Esempio n. 1
0
def compiler_config_files():
    config_files = list()
    config = spack.config.config
    for scope in config.file_scopes:
        name = scope.name
        compiler_config = config.get('compilers', scope=name)
        if compiler_config:
            config_files.append(config.get_config_filename(name, 'compilers'))
    return config_files
Esempio n. 2
0
def compiler_config_files():
    config_files = list()
    config = spack.config.config
    for scope in config.file_scopes:
        name = scope.name
        compiler_config = config.get('compilers', scope=name)
        if compiler_config:
            config_files.append(config.get_config_filename(name, 'compilers'))
    return config_files
Esempio n. 3
0
def add_remote_packages(remote, exclude=[], nostack=False, hardlinks=False):
    """
        Add all installed packages in `remote` to the packages dictionary.

        If nostack == True, packages will not be re-linked if they exist.

        If hardlinks == True, packages will be hard-linked. Not recommended!
    """
    config = spack.config.get_config("config")

    # NOTE: This has to be kept in sync with spack/store.py!
    layout = spack.directory_layout.YamlDirectoryLayout(
        canonicalize_path(osp.join(remote, 'opt', 'spack')),
        hash_len=config.get('install_hash_length'),
        path_scheme=config.get('install_path_scheme'))

    num_packages = 0

    for spec in filter_exclude(layout.all_specs(), exclude):
        src = layout.path_for_spec(spec)
        tgt = spack.store.layout.path_for_spec(spec)
        if osp.exists(tgt):
            if not (nostack or hardlinks):
                if osp.islink(tgt):
                    os.remove(tgt)
                else:
                    tty.warn("Cannot not stack {0} because {1} exists.".format(
                        src, tgt))
                    continue
            else:
                tty.info("Not stacking {0} because already present.".format(
                    src))
        fs.mkdirp(osp.dirname(tgt))
        tty.debug("Linking {0} -> {1}".format(src, tgt))
        if not hardlinks:
            os.symlink(src, tgt)
        else:
            os.link(src, tgt)
        num_packages += 1

    tty.info("Added {0} packages from {1}".format(num_packages, remote))

    return num_packages
Esempio n. 4
0
def test_internal_config_update(config, write_config_file):
    write_config_file('config', config_low, 'low')

    before = config.get('config')
    assert before['install_tree'] == 'install_tree_path'

    # add an internal configuration scope
    scope = spack.config.InternalConfigScope('command_line')
    assert 'InternalConfigScope' in repr(scope)

    config.push_scope(scope)

    command_config = config.get('config', scope='command_line')
    command_config['install_tree'] = 'foo/bar'

    config.set('config', command_config, scope='command_line')

    after = config.get('config')
    assert after['install_tree'] == 'foo/bar'
Esempio n. 5
0
def test_internal_config_update(config, write_config_file):
    write_config_file('config', config_low, 'low')

    before = config.get('config')
    assert before['install_tree'] == 'install_tree_path'

    # add an internal configuration scope
    scope = spack.config.InternalConfigScope('command_line')
    assert 'InternalConfigScope' in repr(scope)

    config.push_scope(scope)

    command_config = config.get('config', scope='command_line')
    command_config['install_tree'] = 'foo/bar'

    config.set('config', command_config, scope='command_line')

    after = config.get('config')
    assert after['install_tree'] == 'foo/bar'
Esempio n. 6
0
File: ci.py Progetto: rexcsn/spack
def test_configure_compilers(mutable_config):
    def assert_missing(config):
        assert ('install_missing_compilers' not in config
                or config['install_missing_compilers'] is False)

    def assert_present(config):
        assert ('install_missing_compilers' in config
                and config['install_missing_compilers'] is True)

    original_config = cfg.get('config')
    assert_missing(original_config)

    ci.configure_compilers('FIND_ANY', scope='site')

    second_config = cfg.get('config')
    assert_missing(second_config)

    ci.configure_compilers('INSTALL_MISSING')
    last_config = cfg.get('config')
    assert_present(last_config)
Esempio n. 7
0
def configure_compilers(compiler_action, scope=None):
    if compiler_action == 'INSTALL_MISSING':
        tty.debug('Make sure bootstrapped compiler will be installed')
        config = cfg.get('config')
        config['install_missing_compilers'] = True
        cfg.set('config', config)
    elif compiler_action == 'FIND_ANY':
        tty.debug('Just find any available compiler')
        find_args = ['find']
        if scope:
            find_args.extend(['--scope', scope])
        output = spack_compiler(*find_args)
        tty.debug('spack compiler find')
        tty.debug(output)
        output = spack_compiler('list')
        tty.debug('spack compiler list')
        tty.debug(output)
    else:
        tty.debug('No compiler action to be taken')

    return None
Esempio n. 8
0
def remove_pr_mirror():
    cfg_scope = cfg.default_modify_scope()
    mirrors = cfg.get('mirrors', scope=cfg_scope)
    mirrors.pop('ci_pr_mirror')
    cfg.set('mirrors', mirrors, scope=cfg_scope)
Esempio n. 9
0
def ci_rebuild(args):
    """Check a single spec against the remote mirror, and rebuild it from
       source if the mirror does not contain the full hash match of the spec
       as computed locally. """
    env = ev.get_env(args, 'ci rebuild', required=True)

    # Make sure the environment is "gitlab-enabled", or else there's nothing
    # to do.
    yaml_root = ev.config_dict(env.yaml)
    gitlab_ci = None
    if 'gitlab-ci' in yaml_root:
        gitlab_ci = yaml_root['gitlab-ci']

    if not gitlab_ci:
        tty.die('spack ci rebuild requires an env containing gitlab-ci cfg')

    # Grab the environment variables we need.  These either come from the
    # pipeline generation step ("spack ci generate"), where they were written
    # out as variables, or else provided by GitLab itself.
    pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT')
    job_log_dir = get_env_var('SPACK_JOB_LOG_DIR')
    repro_dir = get_env_var('SPACK_JOB_REPRO_DIR')
    local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR')
    concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR')
    ci_pipeline_id = get_env_var('CI_PIPELINE_ID')
    ci_job_name = get_env_var('CI_JOB_NAME')
    signing_key = get_env_var('SPACK_SIGNING_KEY')
    root_spec = get_env_var('SPACK_ROOT_SPEC')
    job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME')
    compiler_action = get_env_var('SPACK_COMPILER_ACTION')
    cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME')
    related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH')
    spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE')
    pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL')
    remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL')

    # Construct absolute paths relative to current $CI_PROJECT_DIR
    ci_project_dir = get_env_var('CI_PROJECT_DIR')
    pipeline_artifacts_dir = os.path.join(ci_project_dir,
                                          pipeline_artifacts_dir)
    job_log_dir = os.path.join(ci_project_dir, job_log_dir)
    repro_dir = os.path.join(ci_project_dir, repro_dir)
    local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir)
    concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir)

    # Debug print some of the key environment variables we should have received
    tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir))
    tty.debug('root_spec = {0}'.format(root_spec))
    tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url))
    tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name))
    tty.debug('compiler_action = {0}'.format(compiler_action))

    # Query the environment manifest to find out whether we're reporting to a
    # CDash instance, and if so, gather some information from the manifest to
    # support that task.
    enable_cdash = False
    if 'cdash' in yaml_root:
        enable_cdash = True
        ci_cdash = yaml_root['cdash']
        job_spec_buildgroup = ci_cdash['build-group']
        cdash_base_url = ci_cdash['url']
        cdash_project = ci_cdash['project']
        proj_enc = urlencode({'project': cdash_project})
        eq_idx = proj_enc.find('=') + 1
        cdash_project_enc = proj_enc[eq_idx:]
        cdash_site = ci_cdash['site']
        tty.debug('cdash_base_url = {0}'.format(cdash_base_url))
        tty.debug('cdash_project = {0}'.format(cdash_project))
        tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc))
        tty.debug('cdash_build_name = {0}'.format(cdash_build_name))
        tty.debug('cdash_site = {0}'.format(cdash_site))
        tty.debug('related_builds = {0}'.format(related_builds))
        tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup))

    # Is this a pipeline run on a spack PR or a merge to develop?  It might
    # be neither, e.g. a pipeline run on some environment repository.
    spack_is_pr_pipeline = spack_pipeline_type == 'spack_pull_request'
    spack_is_develop_pipeline = spack_pipeline_type == 'spack_protected_branch'

    tty.debug('Pipeline type - PR: {0}, develop: {1}'.format(
        spack_is_pr_pipeline, spack_is_develop_pipeline))

    # Figure out what is our temporary storage mirror: Is it artifacts
    # buildcache?  Or temporary-storage-url-prefix?  In some cases we need to
    # force something or pipelines might not have a way to propagate build
    # artifacts from upstream to downstream jobs.
    pipeline_mirror_url = None

    temp_storage_url_prefix = None
    if 'temporary-storage-url-prefix' in gitlab_ci:
        temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix']
        pipeline_mirror_url = url_util.join(temp_storage_url_prefix,
                                            ci_pipeline_id)

    enable_artifacts_mirror = False
    if 'enable-artifacts-buildcache' in gitlab_ci:
        enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache']
        if (enable_artifacts_mirror
                or (spack_is_pr_pipeline and not enable_artifacts_mirror
                    and not temp_storage_url_prefix)):
            # If you explicitly enabled the artifacts buildcache feature, or
            # if this is a PR pipeline but you did not enable either of the
            # per-pipeline temporary storage features, we force the use of
            # artifacts buildcache.  Otherwise jobs will not have binary
            # dependencies from previous stages available since we do not
            # allow pushing binaries to the remote mirror during PR pipelines.
            enable_artifacts_mirror = True
            pipeline_mirror_url = 'file://' + local_mirror_dir
            mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format(
                pipeline_mirror_url)
            tty.debug(mirror_msg)

    # Whatever form of root_spec we got, use it to get a map giving us concrete
    # specs for this job and all of its dependencies.
    spec_map = spack_ci.get_concrete_specs(env, root_spec, job_spec_pkg_name,
                                           related_builds, compiler_action)
    job_spec = spec_map[job_spec_pkg_name]

    job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name)
    job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file)

    # To provide logs, cdash reports, etc for developer download/perusal,
    # these things have to be put into artifacts.  This means downstream
    # jobs that "need" this job will get those artifacts too.  So here we
    # need to clean out the artifacts we may have got from upstream jobs.

    cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report')
    if os.path.exists(cdash_report_dir):
        shutil.rmtree(cdash_report_dir)

    if os.path.exists(job_log_dir):
        shutil.rmtree(job_log_dir)

    if os.path.exists(repro_dir):
        shutil.rmtree(repro_dir)

    # Now that we removed them if they existed, create the directories we
    # need for storing artifacts.  The cdash_report directory will be
    # created internally if needed.
    os.makedirs(job_log_dir)
    os.makedirs(repro_dir)

    # Copy the concrete environment files to the repro directory so we can
    # expose them as artifacts and not conflict with the concrete environment
    # files we got as artifacts from the upstream pipeline generation job.
    # Try to cast a slightly wider net too, and hopefully get the generated
    # pipeline yaml.  If we miss it, the user will still be able to go to the
    # pipeline generation job and get it from there.
    target_dirs = [concrete_env_dir, pipeline_artifacts_dir]

    for dir_to_list in target_dirs:
        for file_name in os.listdir(dir_to_list):
            src_file = os.path.join(dir_to_list, file_name)
            if os.path.isfile(src_file):
                dst_file = os.path.join(repro_dir, file_name)
                shutil.copyfile(src_file, dst_file)

    # If signing key was provided via "SPACK_SIGNING_KEY", then try to
    # import it.
    if signing_key:
        spack_ci.import_signing_key(signing_key)

    # Depending on the specifics of this job, we might need to turn on the
    # "config:install_missing compilers" option (to build this job spec
    # with a bootstrapped compiler), or possibly run "spack compiler find"
    # (to build a bootstrap compiler or one of its deps in a
    # compiler-agnostic way), or maybe do nothing at all (to build a spec
    # using a compiler already installed on the target system).
    spack_ci.configure_compilers(compiler_action)

    # Write this job's spec yaml into the reproduction directory, and it will
    # also be used in the generated "spack install" command to install the spec
    tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path))
    with open(job_spec_yaml_path, 'w') as fd:
        fd.write(job_spec.to_yaml(hash=ht.build_hash))

    # Write the concrete root spec yaml into the reproduction directory
    root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml')
    with open(root_spec_yaml_path, 'w') as fd:
        fd.write(spec_map['root'].to_yaml(hash=ht.build_hash))

    # Write some other details to aid in reproduction into an artifact
    repro_file = os.path.join(repro_dir, 'repro.json')
    repro_details = {
        'job_name': ci_job_name,
        'job_spec_yaml': job_spec_yaml_file,
        'root_spec_yaml': 'root.yaml',
        'ci_project_dir': ci_project_dir
    }
    with open(repro_file, 'w') as fd:
        fd.write(json.dumps(repro_details))

    # Write information about spack into an artifact in the repro dir
    spack_info = spack_ci.get_spack_info()
    spack_info_file = os.path.join(repro_dir, 'spack_info.txt')
    with open(spack_info_file, 'w') as fd:
        fd.write('\n{0}\n'.format(spack_info))

    # If we decided there should be a temporary storage mechanism, add that
    # mirror now so it's used when we check for a full hash match already
    # built for this spec.
    if pipeline_mirror_url:
        spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME,
                         pipeline_mirror_url, cfg.default_modify_scope())

    cdash_build_id = None
    cdash_build_stamp = None

    # Check configured mirrors for a built spec with a matching full hash
    matches = bindist.get_mirrors_for_spec(job_spec,
                                           full_hash_match=True,
                                           index_only=False)

    if matches:
        # Got a full hash match on at least one configured mirror.  All
        # matches represent the fully up-to-date spec, so should all be
        # equivalent.  If artifacts mirror is enabled, we just pick one
        # of the matches and download the buildcache files from there to
        # the artifacts, so they're available to be used by dependent
        # jobs in subsequent stages.
        tty.msg('No need to rebuild {0}, found full hash match at: '.format(
            job_spec_pkg_name))
        for match in matches:
            tty.msg('    {0}'.format(match['mirror_url']))
        if enable_artifacts_mirror:
            matching_mirror = matches[0]['mirror_url']
            build_cache_dir = os.path.join(local_mirror_dir, 'build_cache')
            tty.debug('Getting {0} buildcache from {1}'.format(
                job_spec_pkg_name, matching_mirror))
            tty.debug('Downloading to {0}'.format(build_cache_dir))
            buildcache.download_buildcache_files(job_spec, build_cache_dir,
                                                 False, matching_mirror)

        # Now we are done and successful
        sys.exit(0)

    # No full hash match anywhere means we need to rebuild spec

    # Start with spack arguments
    install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS]

    config = cfg.get('config')
    if not config['verify_ssl']:
        install_args.append('-k')

    install_args.extend([
        'install',
        '--keep-stage',
        '--require-full-hash-match',
    ])

    can_verify = spack_ci.can_verify_binaries()
    verify_binaries = can_verify and spack_is_pr_pipeline is False
    if not verify_binaries:
        install_args.append('--no-check-signature')

    # If CDash reporting is enabled, we first register this build with
    # the specified CDash instance, then relate the build to those of
    # its dependencies.
    if enable_cdash:
        tty.debug('CDash: Registering build')
        (cdash_build_id, cdash_build_stamp) = spack_ci.register_cdash_build(
            cdash_build_name, cdash_base_url, cdash_project, cdash_site,
            job_spec_buildgroup)

        if cdash_build_id is not None:
            cdash_upload_url = '{0}/submit.php?project={1}'.format(
                cdash_base_url, cdash_project_enc)

            install_args.extend([
                '--cdash-upload-url',
                cdash_upload_url,
                '--cdash-build',
                cdash_build_name,
                '--cdash-site',
                cdash_site,
                '--cdash-buildstamp',
                cdash_build_stamp,
            ])

            tty.debug('CDash: Relating build with dependency builds')
            spack_ci.relate_cdash_builds(
                spec_map, cdash_base_url, cdash_build_id, cdash_project,
                [pipeline_mirror_url, pr_mirror_url, remote_mirror_url])

    # A compiler action of 'FIND_ANY' means we are building a bootstrap
    # compiler or one of its deps.
    # TODO: when compilers are dependencies, we should include --no-add
    if compiler_action != 'FIND_ANY':
        install_args.append('--no-add')

    # TODO: once we have the concrete spec registry, use the DAG hash
    # to identify the spec to install, rather than the concrete spec
    # yaml file.
    install_args.extend(['-f', job_spec_yaml_path])

    tty.debug('Installing {0} from source'.format(job_spec.name))
    tty.debug('spack install arguments: {0}'.format(install_args))

    # Write the install command to a shell script
    with open('install.sh', 'w') as fd:
        fd.write('#!/bin/bash\n\n')
        fd.write('\n# spack install command\n')
        fd.write(' '.join(['"{0}"'.format(i) for i in install_args]))
        fd.write('\n')

    st = os.stat('install.sh')
    os.chmod('install.sh', st.st_mode | stat.S_IEXEC)

    install_copy_path = os.path.join(repro_dir, 'install.sh')
    shutil.copyfile('install.sh', install_copy_path)

    # Run the generated install.sh shell script as if it were being run in
    # a login shell.
    try:
        install_process = subprocess.Popen(['bash', '-l', './install.sh'])
        install_process.wait()
        install_exit_code = install_process.returncode
    except (ValueError, subprocess.CalledProcessError, OSError) as inst:
        tty.error('Encountered error running install script')
        tty.error(inst)

    # Now do the post-install tasks
    tty.debug('spack install exited {0}'.format(install_exit_code))

    # If a spec fails to build in a spack develop pipeline, we add it to a
    # list of known broken full hashes.  This allows spack PR pipelines to
    # avoid wasting compute cycles attempting to build those hashes.
    if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline:
        tty.debug('Install failed on develop')
        if 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            dev_fail_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash)
            tty.msg('Reporting broken develop build as: {0}'.format(
                broken_spec_path))
            tmpdir = tempfile.mkdtemp()
            empty_file_path = os.path.join(tmpdir, 'empty.txt')

            broken_spec_details = {
                'broken-spec': {
                    'job-url': get_env_var('CI_JOB_URL'),
                    'pipeline-url': get_env_var('CI_PIPELINE_URL'),
                    'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash)
                }
            }

            try:
                with open(empty_file_path, 'w') as efd:
                    efd.write(syaml.dump(broken_spec_details))
                web_util.push_to_url(empty_file_path,
                                     broken_spec_path,
                                     keep_original=False,
                                     extra_args={'ContentType': 'text/plain'})
            except Exception as err:
                # If we got some kind of S3 (access denied or other connection
                # error), the first non boto-specific class in the exception
                # hierarchy is Exception.  Just print a warning and return
                msg = 'Error writing to broken specs list {0}: {1}'.format(
                    broken_spec_path, err)
                tty.warn(msg)
            finally:
                shutil.rmtree(tmpdir)

    # We generated the "spack install ..." command to "--keep-stage", copy
    # any logs from the staging directory to artifacts now
    spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir)

    # Create buildcache on remote mirror, either on pr-specific mirror or
    # on the main mirror defined in the gitlab-enabled spack environment
    if spack_is_pr_pipeline:
        buildcache_mirror_url = pr_mirror_url
    else:
        buildcache_mirror_url = remote_mirror_url

    # If the install succeeded, create a buildcache entry for this job spec
    # and push it to one or more mirrors.  If the install did not succeed,
    # print out some instructions on how to reproduce this build failure
    # outside of the pipeline environment.
    if install_exit_code == 0:
        can_sign = spack_ci.can_sign_binaries()
        sign_binaries = can_sign and spack_is_pr_pipeline is False

        # Create buildcache in either the main remote mirror, or in the
        # per-PR mirror, if this is a PR pipeline
        if buildcache_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          buildcache_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, buildcache_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 buildcache_mirror_url)

        # Create another copy of that buildcache in the per-pipeline
        # temporary storage mirror (this is only done if either
        # artifacts buildcache is enabled or a temporary storage url
        # prefix is set)
        if pipeline_mirror_url:
            spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path,
                                          pipeline_mirror_url, sign_binaries)

            if cdash_build_id:
                tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
                    cdash_build_id, pipeline_mirror_url))
                spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec,
                                                 pipeline_mirror_url)

        # If this is a develop pipeline, check if the spec that we just built is
        # on the broken-specs list. If so, remove it.
        if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci:
            broken_specs_url = gitlab_ci['broken-specs-url']
            just_built_hash = job_spec.full_hash()
            broken_spec_path = url_util.join(broken_specs_url, just_built_hash)
            if web_util.url_exists(broken_spec_path):
                tty.msg('Removing {0} from the list of broken specs'.format(
                    broken_spec_path))
                try:
                    web_util.remove_url(broken_spec_path)
                except Exception as err:
                    # If we got some kind of S3 (access denied or other connection
                    # error), the first non boto-specific class in the exception
                    # hierarchy is Exception.  Just print a warning and return
                    msg = 'Error removing {0} from broken specs list: {1}'.format(
                        broken_spec_path, err)
                    tty.warn(msg)

    else:
        tty.debug('spack install exited non-zero, will not create buildcache')

        api_root_url = get_env_var('CI_API_V4_URL')
        ci_project_id = get_env_var('CI_PROJECT_ID')
        ci_job_id = get_env_var('CI_JOB_ID')

        repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format(
            api_root_url, ci_project_id, ci_job_id)

        # Control characters cause this to be printed in blue so it stands out
        reproduce_msg = """

\033[34mTo reproduce this build locally, run:

    spack ci reproduce-build {0} [--working-dir <dir>]

If this project does not have public pipelines, you will need to first:

    export GITLAB_PRIVATE_TOKEN=<generated_token>

... then follow the printed instructions.\033[0;0m

""".format(repro_job_url)

        print(reproduce_msg)

    # Tie job success/failure to the success/failure of building the spec
    return install_exit_code
Esempio n. 10
0
def test_internal_config_from_data():
    config = spack.config.Configuration()

    # add an internal config initialized from an inline dict
    config.push_scope(
        spack.config.InternalConfigScope(
            '_builtin', {'config': {
                'verify_ssl': False,
                'build_jobs': 6,
            }}))

    assert config.get('config:verify_ssl', scope='_builtin') is False
    assert config.get('config:build_jobs', scope='_builtin') == 6

    assert config.get('config:verify_ssl') is False
    assert config.get('config:build_jobs') == 6

    # push one on top and see what happens.
    config.push_scope(
        spack.config.InternalConfigScope(
            'higher', {'config': {
                'checksum': True,
                'verify_ssl': True,
            }}))

    assert config.get('config:verify_ssl', scope='_builtin') is False
    assert config.get('config:build_jobs', scope='_builtin') == 6

    assert config.get('config:verify_ssl', scope='higher') is True
    assert config.get('config:build_jobs', scope='higher') is None

    assert config.get('config:verify_ssl') is True
    assert config.get('config:build_jobs') == 6
    assert config.get('config:checksum') is True

    assert config.get('config:checksum', scope='_builtin') is None
    assert config.get('config:checksum', scope='higher') is True
Esempio n. 11
0
from spack.directory_layout import YamlDirectoryLayout
from spack.directory_layout import YamlExtensionsLayout

__author__ = "Benedikt Hegner (CERN)"
__all__ = ['db', 'extensions', 'layout', 'root']

#
# Read in the config
#
config = spack.config.get_config("config")

#
# Set up the install path
#
root = canonicalize_path(
    config.get('install_tree', os.path.join(spack.opt_path, 'spack')))

#
# Set up the installed packages database
#
db = Database(root)

#
# This controls how spack lays out install prefixes and
# stage directories.
#
layout = YamlDirectoryLayout(root,
                             hash_len=config.get('install_hash_length'),
                             path_scheme=config.get('install_path_scheme'))

extensions = YamlExtensionsLayout(root, layout)
Esempio n. 12
0
def extract_tarball(spec,
                    filename,
                    allow_root=False,
                    unsigned=False,
                    force=False):
    """
    extract binary tarball for given package into install area
    """
    if os.path.exists(spec.prefix):
        if force:
            shutil.rmtree(spec.prefix)
        else:
            raise NoOverwriteException(str(spec.prefix))

    tmpdir = tempfile.mkdtemp()
    stagepath = os.path.dirname(filename)
    spackfile_name = tarball_name(spec, '.spack')
    spackfile_path = os.path.join(stagepath, spackfile_name)
    tarfile_name = tarball_name(spec, '.tar.gz')
    tarfile_path = os.path.join(tmpdir, tarfile_name)
    specfile_name = tarball_name(spec, '.spec.yaml')
    specfile_path = os.path.join(tmpdir, specfile_name)

    with closing(tarfile.open(spackfile_path, 'r')) as tar:
        tar.extractall(tmpdir)
    if not unsigned:
        if os.path.exists('%s.asc' % specfile_path):
            try:
                suppress = config.get('config:suppress_gpg_warnings', False)
                Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
            except Exception as e:
                shutil.rmtree(tmpdir)
                tty.die(e)
        else:
            shutil.rmtree(tmpdir)
            raise NoVerifyException(
                "Package spec file failed signature verification.\n"
                "Use spack buildcache keys to download "
                "and install a key for verification from the mirror.")
    # get the sha256 checksum of the tarball
    checksum = checksum_tarball(tarfile_path)

    # get the sha256 checksum recorded at creation
    spec_dict = {}
    with open(specfile_path, 'r') as inputfile:
        content = inputfile.read()
        spec_dict = syaml.load(content)
    bchecksum = spec_dict['binary_cache_checksum']

    # if the checksums don't match don't install
    if bchecksum['hash'] != checksum:
        shutil.rmtree(tmpdir)
        raise NoChecksumException(
            "Package tarball failed checksum verification.\n"
            "It cannot be installed.")

    new_relative_prefix = str(
        os.path.relpath(spec.prefix, spack.store.layout.root))
    # if the original relative prefix is in the spec file use it
    buildinfo = spec_dict.get('buildinfo', {})
    old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
    # if the original relative prefix and new relative prefix differ the
    # directory layout has changed and the  buildcache cannot be installed
    if old_relative_prefix != new_relative_prefix:
        shutil.rmtree(tmpdir)
        msg = "Package tarball was created from an install "
        msg += "prefix with a different directory layout.\n"
        msg += "It cannot be relocated."
        raise NewLayoutException(msg)

    # extract the tarball in a temp directory
    with closing(tarfile.open(tarfile_path, 'r')) as tar:
        tar.extractall(path=tmpdir)
    # the base of the install prefix is used when creating the tarball
    # so the pathname should be the same now that the directory layout
    # is confirmed
    workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
    install_tree(workdir, spec.prefix, symlinks=True)

    # cleanup
    os.remove(tarfile_path)
    os.remove(specfile_path)

    try:
        relocate_package(spec.prefix, spec, allow_root)
    except Exception as e:
        shutil.rmtree(spec.prefix)
        tty.die(e)
    else:
        manifest_file = os.path.join(spec.prefix,
                                     spack.store.layout.metadata_dir,
                                     spack.store.layout.manifest_file_name)
        if not os.path.exists(manifest_file):
            spec_id = spec.format('{name}/{hash:7}')
            tty.warn('No manifest file in tarball for spec %s' % spec_id)
    finally:
        shutil.rmtree(tmpdir)
Esempio n. 13
0
from spack.util.path import canonicalize_path
from spack.database import Database
from spack.directory_layout import YamlDirectoryLayout

__author__ = "Benedikt Hegner (CERN)"
__all__ = ['db', 'layout', 'root']

#
# Read in the config
#
config = spack.config.get_config("config")

#
# Set up the install path
#
root = canonicalize_path(
    config.get('install_tree', os.path.join(spack.opt_path, 'spack')))

#
# Set up the installed packages database
#
db = Database(root)

#
# This controls how spack lays out install prefixes and
# stage directories.
#
layout = YamlDirectoryLayout(root,
                             hash_len=config.get('install_hash_length'),
                             path_scheme=config.get('install_path_scheme'))
Esempio n. 14
0
import spack
import spack.config
from spack.util.path import canonicalize_path
from spack.database import Database
from spack.directory_layout import YamlDirectoryLayout

__author__ = "Benedikt Hegner (CERN)"
__all__ = ['db', 'layout', 'root']

#
# Read in the config
#
config = spack.config.get_config("config")

#
# Set up the install path
#
root = canonicalize_path(
    config.get('install_tree', os.path.join(spack.opt_path, 'spack')))

#
# Set up the installed packages database
#
db = Database(root)

#
# This controls how spack lays out install prefixes and
# stage directories.
#
layout = YamlDirectoryLayout(root)
Esempio n. 15
0
def add_pr_mirror(url):
    cfg_scope = cfg.default_modify_scope()
    mirrors = cfg.get('mirrors', scope=cfg_scope)
    items = [(n, u) for n, u in mirrors.items()]
    items.insert(0, ('ci_pr_mirror', url))
    cfg.set('mirrors', syaml.syaml_dict(items), scope=cfg_scope)
Esempio n. 16
0
File: config.py Progetto: LLNL/spack
def test_internal_config_from_data():
    config = spack.config.Configuration()

    # add an internal config initialized from an inline dict
    config.push_scope(spack.config.InternalConfigScope('_builtin', {
        'config': {
            'verify_ssl': False,
            'build_jobs': 6,
        }
    }))

    assert config.get('config:verify_ssl', scope='_builtin') is False
    assert config.get('config:build_jobs', scope='_builtin') == 6

    assert config.get('config:verify_ssl') is False
    assert config.get('config:build_jobs') == 6

    # push one on top and see what happens.
    config.push_scope(spack.config.InternalConfigScope('higher', {
        'config': {
            'checksum': True,
            'verify_ssl': True,
        }
    }))

    assert config.get('config:verify_ssl', scope='_builtin') is False
    assert config.get('config:build_jobs', scope='_builtin') == 6

    assert config.get('config:verify_ssl', scope='higher') is True
    assert config.get('config:build_jobs', scope='higher') is None

    assert config.get('config:verify_ssl') is True
    assert config.get('config:build_jobs') == 6
    assert config.get('config:checksum') is True

    assert config.get('config:checksum', scope='_builtin') is None
    assert config.get('config:checksum', scope='higher') is True
Esempio n. 17
0
def extract_tarball(spec,
                    filename,
                    allow_root=False,
                    unsigned=False,
                    force=False):
    """
    extract binary tarball for given package into install area
    """
    if os.path.exists(spec.prefix):
        if force:
            shutil.rmtree(spec.prefix)
        else:
            raise NoOverwriteException(str(spec.prefix))

    tmpdir = tempfile.mkdtemp()
    stagepath = os.path.dirname(filename)
    spackfile_name = tarball_name(spec, '.spack')
    spackfile_path = os.path.join(stagepath, spackfile_name)
    tarfile_name = tarball_name(spec, '.tar.gz')
    tarfile_path = os.path.join(tmpdir, tarfile_name)
    specfile_name = tarball_name(spec, '.spec.yaml')
    specfile_path = os.path.join(tmpdir, specfile_name)

    with closing(tarfile.open(spackfile_path, 'r')) as tar:
        tar.extractall(tmpdir)
    # some buildcache tarfiles use bzip2 compression
    if not os.path.exists(tarfile_path):
        tarfile_name = tarball_name(spec, '.tar.bz2')
        tarfile_path = os.path.join(tmpdir, tarfile_name)
    if not unsigned:
        if os.path.exists('%s.asc' % specfile_path):
            try:
                suppress = config.get('config:suppress_gpg_warnings', False)
                Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
            except Exception as e:
                shutil.rmtree(tmpdir)
                raise e
        else:
            shutil.rmtree(tmpdir)
            raise NoVerifyException(
                "Package spec file failed signature verification.\n"
                "Use spack buildcache keys to download "
                "and install a key for verification from the mirror.")
    # get the sha256 checksum of the tarball
    checksum = checksum_tarball(tarfile_path)

    # get the sha256 checksum recorded at creation
    spec_dict = {}
    with open(specfile_path, 'r') as inputfile:
        content = inputfile.read()
        spec_dict = syaml.load(content)
    bchecksum = spec_dict['binary_cache_checksum']

    # if the checksums don't match don't install
    if bchecksum['hash'] != checksum:
        shutil.rmtree(tmpdir)
        raise NoChecksumException(
            "Package tarball failed checksum verification.\n"
            "It cannot be installed.")

    new_relative_prefix = str(
        os.path.relpath(spec.prefix, spack.store.layout.root))
    # if the original relative prefix is in the spec file use it
    buildinfo = spec_dict.get('buildinfo', {})
    old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
    rel = buildinfo.get('relative_rpaths')
    # if the original relative prefix and new relative prefix differ the
    # directory layout has changed and the  buildcache cannot be installed
    # if it was created with relative rpaths
    info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
    tty.debug(info % (old_relative_prefix, new_relative_prefix, rel))
    #    if (old_relative_prefix != new_relative_prefix and (rel)):
    #        shutil.rmtree(tmpdir)
    #        msg = "Package tarball was created from an install "
    #        msg += "prefix with a different directory layout. "
    #        msg += "It cannot be relocated because it "
    #        msg += "uses relative rpaths."
    #        raise NewLayoutException(msg)

    # extract the tarball in a temp directory
    with closing(tarfile.open(tarfile_path, 'r')) as tar:
        tar.extractall(path=tmpdir)
    # get the parent directory of the file .spack/binary_distribution
    # this should the directory unpacked from the tarball whose
    # name is unknown because the prefix naming is unknown
    bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
    workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
    tty.debug('workdir %s' % workdir)
    # install_tree copies hardlinks
    # create a temporary tarfile from prefix and exract it to workdir
    # tarfile preserves hardlinks
    temp_tarfile_name = tarball_name(spec, '.tar')
    temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
    with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
        tar.add(name='%s' % workdir, arcname='.')
    with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
        tar.extractall(spec.prefix)
    os.remove(temp_tarfile_path)

    # cleanup
    os.remove(tarfile_path)
    os.remove(specfile_path)

    try:
        relocate_package(spec, allow_root)
    except Exception as e:
        shutil.rmtree(spec.prefix)
        raise e
    else:
        manifest_file = os.path.join(spec.prefix,
                                     spack.store.layout.metadata_dir,
                                     spack.store.layout.manifest_file_name)
        if not os.path.exists(manifest_file):
            spec_id = spec.format('{name}/{hash:7}')
            tty.warn('No manifest file in tarball for spec %s' % spec_id)
    finally:
        shutil.rmtree(tmpdir)
        if os.path.exists(filename):
            os.remove(filename)