def generate_package_index(cache_prefix): """Create the build cache index page. Creates (or replaces) the "index.html" page at the location given in cache_prefix. This page contains a link for each binary package (*.yaml) and signing key (*.key) under cache_prefix. """ tmpdir = tempfile.mkdtemp() try: index_html_path = os.path.join(tmpdir, 'index.html') file_list = (entry for entry in web_util.list_url(cache_prefix) if (entry.endswith('.yaml') or entry.endswith('.key'))) with open(index_html_path, 'w') as f: f.write( BUILD_CACHE_INDEX_TEMPLATE.format( title='Spack Package Index', path_list='\n'.join( BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path) for path in file_list))) web_util.push_to_url(index_html_path, url_util.join(cache_prefix, 'index.html'), keep_original=False, extra_args={'ContentType': 'text/html'}) finally: shutil.rmtree(tmpdir)
def generate_key_index(key_prefix, tmpdir=None): """Create the key index page. Creates (or replaces) the "index.json" page at the location given in key_prefix. This page contains an entry for each key (.pub) under key_prefix. """ tty.debug(' '.join(('Retrieving key.pub files from', url_util.format(key_prefix), 'to build key index'))) fingerprints = (entry[:-4] for entry in web_util.list_url(key_prefix, recursive=False) if entry.endswith('.pub')) keys_local = url_util.local_file_path(key_prefix) if keys_local: target = os.path.join(keys_local, 'index.json') else: target = os.path.join(tmpdir, 'index.json') index = { 'keys': dict((fingerprint, {}) for fingerprint in sorted(set(fingerprints))) } with open(target, 'w') as f: sjson.dump(index, f) if not keys_local: web_util.push_to_url(target, url_util.join(key_prefix, 'index.json'), keep_original=False, extra_args={'ContentType': 'application/json'})
def archive(self, destination): """Just moves this archive to the destination.""" if not self.archive_file: raise NoArchiveFileError("Cannot call archive() before fetching.") web_util.push_to_url(self.archive_file, destination, keep_original=True)
def generate_package_index(cache_prefix): """Create the build cache index page. Creates (or replaces) the "index.json" page at the location given in cache_prefix. This page contains a link for each binary package (*.yaml) and public key (*.key) under cache_prefix. """ tmpdir = tempfile.mkdtemp() db_root_dir = os.path.join(tmpdir, 'db_root') db = spack_db.Database(None, db_dir=db_root_dir, enable_transaction_locking=False, record_fields=['spec', 'ref_count']) file_list = ( entry for entry in web_util.list_url(cache_prefix) if entry.endswith('.yaml')) tty.debug('Retrieving spec.yaml files from {0} to build index'.format( cache_prefix)) for file_path in file_list: try: yaml_url = url_util.join(cache_prefix, file_path) tty.debug('fetching {0}'.format(yaml_url)) _, _, yaml_file = web_util.read_from_url(yaml_url) yaml_contents = codecs.getreader('utf-8')(yaml_file).read() # yaml_obj = syaml.load(yaml_contents) # s = Spec.from_yaml(yaml_obj) s = Spec.from_yaml(yaml_contents) db.add(s, None) except (URLError, web_util.SpackWebError) as url_err: tty.error('Error reading spec.yaml: {0}'.format(file_path)) tty.error(url_err) try: index_json_path = os.path.join(db_root_dir, 'index.json') with open(index_json_path, 'w') as f: db._write_to_file(f) web_util.push_to_url( index_json_path, url_util.join(cache_prefix, 'index.json'), keep_original=False, extra_args={'ContentType': 'application/json'}) finally: shutil.rmtree(tmpdir)
def write_cdashid_to_mirror(cdashid, spec, mirror_url): if not spec.concrete: tty.die('Can only write cdashid for concrete spec to mirror') with TemporaryDirectory() as tmpdir: local_cdash_path = os.path.join(tmpdir, 'job.cdashid') with open(local_cdash_path, 'w') as fd: fd.write(cdashid) buildcache_name = bindist.tarball_name(spec, '') cdashid_file_name = '{0}.cdashid'.format(buildcache_name) remote_url = os.path.join( mirror_url, bindist.build_cache_relative_path(), cdashid_file_name) tty.debug('pushing cdashid to url') tty.debug(' local file path: {0}'.format(local_cdash_path)) tty.debug(' remote url: {0}'.format(remote_url)) web_util.push_to_url(local_cdash_path, remote_url)
def ci_rebuild(args): """Check a single spec against the remote mirror, and rebuild it from source if the mirror does not contain the full hash match of the spec as computed locally. """ env = ev.get_env(args, 'ci rebuild', required=True) # Make sure the environment is "gitlab-enabled", or else there's nothing # to do. yaml_root = ev.config_dict(env.yaml) gitlab_ci = None if 'gitlab-ci' in yaml_root: gitlab_ci = yaml_root['gitlab-ci'] if not gitlab_ci: tty.die('spack ci rebuild requires an env containing gitlab-ci cfg') # Grab the environment variables we need. These either come from the # pipeline generation step ("spack ci generate"), where they were written # out as variables, or else provided by GitLab itself. pipeline_artifacts_dir = get_env_var('SPACK_ARTIFACTS_ROOT') job_log_dir = get_env_var('SPACK_JOB_LOG_DIR') repro_dir = get_env_var('SPACK_JOB_REPRO_DIR') local_mirror_dir = get_env_var('SPACK_LOCAL_MIRROR_DIR') concrete_env_dir = get_env_var('SPACK_CONCRETE_ENV_DIR') ci_pipeline_id = get_env_var('CI_PIPELINE_ID') ci_job_name = get_env_var('CI_JOB_NAME') signing_key = get_env_var('SPACK_SIGNING_KEY') root_spec = get_env_var('SPACK_ROOT_SPEC') job_spec_pkg_name = get_env_var('SPACK_JOB_SPEC_PKG_NAME') compiler_action = get_env_var('SPACK_COMPILER_ACTION') cdash_build_name = get_env_var('SPACK_CDASH_BUILD_NAME') related_builds = get_env_var('SPACK_RELATED_BUILDS_CDASH') spack_pipeline_type = get_env_var('SPACK_PIPELINE_TYPE') pr_mirror_url = get_env_var('SPACK_PR_MIRROR_URL') remote_mirror_url = get_env_var('SPACK_REMOTE_MIRROR_URL') # Construct absolute paths relative to current $CI_PROJECT_DIR ci_project_dir = get_env_var('CI_PROJECT_DIR') pipeline_artifacts_dir = os.path.join(ci_project_dir, pipeline_artifacts_dir) job_log_dir = os.path.join(ci_project_dir, job_log_dir) repro_dir = os.path.join(ci_project_dir, repro_dir) local_mirror_dir = os.path.join(ci_project_dir, local_mirror_dir) concrete_env_dir = os.path.join(ci_project_dir, concrete_env_dir) # Debug print some of the key environment variables we should have received tty.debug('pipeline_artifacts_dir = {0}'.format(pipeline_artifacts_dir)) tty.debug('root_spec = {0}'.format(root_spec)) tty.debug('remote_mirror_url = {0}'.format(remote_mirror_url)) tty.debug('job_spec_pkg_name = {0}'.format(job_spec_pkg_name)) tty.debug('compiler_action = {0}'.format(compiler_action)) # Query the environment manifest to find out whether we're reporting to a # CDash instance, and if so, gather some information from the manifest to # support that task. enable_cdash = False if 'cdash' in yaml_root: enable_cdash = True ci_cdash = yaml_root['cdash'] job_spec_buildgroup = ci_cdash['build-group'] cdash_base_url = ci_cdash['url'] cdash_project = ci_cdash['project'] proj_enc = urlencode({'project': cdash_project}) eq_idx = proj_enc.find('=') + 1 cdash_project_enc = proj_enc[eq_idx:] cdash_site = ci_cdash['site'] tty.debug('cdash_base_url = {0}'.format(cdash_base_url)) tty.debug('cdash_project = {0}'.format(cdash_project)) tty.debug('cdash_project_enc = {0}'.format(cdash_project_enc)) tty.debug('cdash_build_name = {0}'.format(cdash_build_name)) tty.debug('cdash_site = {0}'.format(cdash_site)) tty.debug('related_builds = {0}'.format(related_builds)) tty.debug('job_spec_buildgroup = {0}'.format(job_spec_buildgroup)) # Is this a pipeline run on a spack PR or a merge to develop? It might # be neither, e.g. a pipeline run on some environment repository. spack_is_pr_pipeline = spack_pipeline_type == 'spack_pull_request' spack_is_develop_pipeline = spack_pipeline_type == 'spack_protected_branch' tty.debug('Pipeline type - PR: {0}, develop: {1}'.format( spack_is_pr_pipeline, spack_is_develop_pipeline)) # Figure out what is our temporary storage mirror: Is it artifacts # buildcache? Or temporary-storage-url-prefix? In some cases we need to # force something or pipelines might not have a way to propagate build # artifacts from upstream to downstream jobs. pipeline_mirror_url = None temp_storage_url_prefix = None if 'temporary-storage-url-prefix' in gitlab_ci: temp_storage_url_prefix = gitlab_ci['temporary-storage-url-prefix'] pipeline_mirror_url = url_util.join(temp_storage_url_prefix, ci_pipeline_id) enable_artifacts_mirror = False if 'enable-artifacts-buildcache' in gitlab_ci: enable_artifacts_mirror = gitlab_ci['enable-artifacts-buildcache'] if (enable_artifacts_mirror or (spack_is_pr_pipeline and not enable_artifacts_mirror and not temp_storage_url_prefix)): # If you explicitly enabled the artifacts buildcache feature, or # if this is a PR pipeline but you did not enable either of the # per-pipeline temporary storage features, we force the use of # artifacts buildcache. Otherwise jobs will not have binary # dependencies from previous stages available since we do not # allow pushing binaries to the remote mirror during PR pipelines. enable_artifacts_mirror = True pipeline_mirror_url = 'file://' + local_mirror_dir mirror_msg = 'artifact buildcache enabled, mirror url: {0}'.format( pipeline_mirror_url) tty.debug(mirror_msg) # Whatever form of root_spec we got, use it to get a map giving us concrete # specs for this job and all of its dependencies. spec_map = spack_ci.get_concrete_specs(env, root_spec, job_spec_pkg_name, related_builds, compiler_action) job_spec = spec_map[job_spec_pkg_name] job_spec_yaml_file = '{0}.yaml'.format(job_spec_pkg_name) job_spec_yaml_path = os.path.join(repro_dir, job_spec_yaml_file) # To provide logs, cdash reports, etc for developer download/perusal, # these things have to be put into artifacts. This means downstream # jobs that "need" this job will get those artifacts too. So here we # need to clean out the artifacts we may have got from upstream jobs. cdash_report_dir = os.path.join(pipeline_artifacts_dir, 'cdash_report') if os.path.exists(cdash_report_dir): shutil.rmtree(cdash_report_dir) if os.path.exists(job_log_dir): shutil.rmtree(job_log_dir) if os.path.exists(repro_dir): shutil.rmtree(repro_dir) # Now that we removed them if they existed, create the directories we # need for storing artifacts. The cdash_report directory will be # created internally if needed. os.makedirs(job_log_dir) os.makedirs(repro_dir) # Copy the concrete environment files to the repro directory so we can # expose them as artifacts and not conflict with the concrete environment # files we got as artifacts from the upstream pipeline generation job. # Try to cast a slightly wider net too, and hopefully get the generated # pipeline yaml. If we miss it, the user will still be able to go to the # pipeline generation job and get it from there. target_dirs = [concrete_env_dir, pipeline_artifacts_dir] for dir_to_list in target_dirs: for file_name in os.listdir(dir_to_list): src_file = os.path.join(dir_to_list, file_name) if os.path.isfile(src_file): dst_file = os.path.join(repro_dir, file_name) shutil.copyfile(src_file, dst_file) # If signing key was provided via "SPACK_SIGNING_KEY", then try to # import it. if signing_key: spack_ci.import_signing_key(signing_key) # Depending on the specifics of this job, we might need to turn on the # "config:install_missing compilers" option (to build this job spec # with a bootstrapped compiler), or possibly run "spack compiler find" # (to build a bootstrap compiler or one of its deps in a # compiler-agnostic way), or maybe do nothing at all (to build a spec # using a compiler already installed on the target system). spack_ci.configure_compilers(compiler_action) # Write this job's spec yaml into the reproduction directory, and it will # also be used in the generated "spack install" command to install the spec tty.debug('job concrete spec path: {0}'.format(job_spec_yaml_path)) with open(job_spec_yaml_path, 'w') as fd: fd.write(job_spec.to_yaml(hash=ht.build_hash)) # Write the concrete root spec yaml into the reproduction directory root_spec_yaml_path = os.path.join(repro_dir, 'root.yaml') with open(root_spec_yaml_path, 'w') as fd: fd.write(spec_map['root'].to_yaml(hash=ht.build_hash)) # Write some other details to aid in reproduction into an artifact repro_file = os.path.join(repro_dir, 'repro.json') repro_details = { 'job_name': ci_job_name, 'job_spec_yaml': job_spec_yaml_file, 'root_spec_yaml': 'root.yaml', 'ci_project_dir': ci_project_dir } with open(repro_file, 'w') as fd: fd.write(json.dumps(repro_details)) # Write information about spack into an artifact in the repro dir spack_info = spack_ci.get_spack_info() spack_info_file = os.path.join(repro_dir, 'spack_info.txt') with open(spack_info_file, 'w') as fd: fd.write('\n{0}\n'.format(spack_info)) # If we decided there should be a temporary storage mechanism, add that # mirror now so it's used when we check for a full hash match already # built for this spec. if pipeline_mirror_url: spack.mirror.add(spack_ci.TEMP_STORAGE_MIRROR_NAME, pipeline_mirror_url, cfg.default_modify_scope()) cdash_build_id = None cdash_build_stamp = None # Check configured mirrors for a built spec with a matching full hash matches = bindist.get_mirrors_for_spec(job_spec, full_hash_match=True, index_only=False) if matches: # Got a full hash match on at least one configured mirror. All # matches represent the fully up-to-date spec, so should all be # equivalent. If artifacts mirror is enabled, we just pick one # of the matches and download the buildcache files from there to # the artifacts, so they're available to be used by dependent # jobs in subsequent stages. tty.msg('No need to rebuild {0}, found full hash match at: '.format( job_spec_pkg_name)) for match in matches: tty.msg(' {0}'.format(match['mirror_url'])) if enable_artifacts_mirror: matching_mirror = matches[0]['mirror_url'] build_cache_dir = os.path.join(local_mirror_dir, 'build_cache') tty.debug('Getting {0} buildcache from {1}'.format( job_spec_pkg_name, matching_mirror)) tty.debug('Downloading to {0}'.format(build_cache_dir)) buildcache.download_buildcache_files(job_spec, build_cache_dir, False, matching_mirror) # Now we are done and successful sys.exit(0) # No full hash match anywhere means we need to rebuild spec # Start with spack arguments install_args = [base_arg for base_arg in CI_REBUILD_INSTALL_BASE_ARGS] config = cfg.get('config') if not config['verify_ssl']: install_args.append('-k') install_args.extend([ 'install', '--keep-stage', '--require-full-hash-match', ]) can_verify = spack_ci.can_verify_binaries() verify_binaries = can_verify and spack_is_pr_pipeline is False if not verify_binaries: install_args.append('--no-check-signature') # If CDash reporting is enabled, we first register this build with # the specified CDash instance, then relate the build to those of # its dependencies. if enable_cdash: tty.debug('CDash: Registering build') (cdash_build_id, cdash_build_stamp) = spack_ci.register_cdash_build( cdash_build_name, cdash_base_url, cdash_project, cdash_site, job_spec_buildgroup) if cdash_build_id is not None: cdash_upload_url = '{0}/submit.php?project={1}'.format( cdash_base_url, cdash_project_enc) install_args.extend([ '--cdash-upload-url', cdash_upload_url, '--cdash-build', cdash_build_name, '--cdash-site', cdash_site, '--cdash-buildstamp', cdash_build_stamp, ]) tty.debug('CDash: Relating build with dependency builds') spack_ci.relate_cdash_builds( spec_map, cdash_base_url, cdash_build_id, cdash_project, [pipeline_mirror_url, pr_mirror_url, remote_mirror_url]) # A compiler action of 'FIND_ANY' means we are building a bootstrap # compiler or one of its deps. # TODO: when compilers are dependencies, we should include --no-add if compiler_action != 'FIND_ANY': install_args.append('--no-add') # TODO: once we have the concrete spec registry, use the DAG hash # to identify the spec to install, rather than the concrete spec # yaml file. install_args.extend(['-f', job_spec_yaml_path]) tty.debug('Installing {0} from source'.format(job_spec.name)) tty.debug('spack install arguments: {0}'.format(install_args)) # Write the install command to a shell script with open('install.sh', 'w') as fd: fd.write('#!/bin/bash\n\n') fd.write('\n# spack install command\n') fd.write(' '.join(['"{0}"'.format(i) for i in install_args])) fd.write('\n') st = os.stat('install.sh') os.chmod('install.sh', st.st_mode | stat.S_IEXEC) install_copy_path = os.path.join(repro_dir, 'install.sh') shutil.copyfile('install.sh', install_copy_path) # Run the generated install.sh shell script as if it were being run in # a login shell. try: install_process = subprocess.Popen(['bash', '-l', './install.sh']) install_process.wait() install_exit_code = install_process.returncode except (ValueError, subprocess.CalledProcessError, OSError) as inst: tty.error('Encountered error running install script') tty.error(inst) # Now do the post-install tasks tty.debug('spack install exited {0}'.format(install_exit_code)) # If a spec fails to build in a spack develop pipeline, we add it to a # list of known broken full hashes. This allows spack PR pipelines to # avoid wasting compute cycles attempting to build those hashes. if install_exit_code == INSTALL_FAIL_CODE and spack_is_develop_pipeline: tty.debug('Install failed on develop') if 'broken-specs-url' in gitlab_ci: broken_specs_url = gitlab_ci['broken-specs-url'] dev_fail_hash = job_spec.full_hash() broken_spec_path = url_util.join(broken_specs_url, dev_fail_hash) tty.msg('Reporting broken develop build as: {0}'.format( broken_spec_path)) tmpdir = tempfile.mkdtemp() empty_file_path = os.path.join(tmpdir, 'empty.txt') broken_spec_details = { 'broken-spec': { 'job-url': get_env_var('CI_JOB_URL'), 'pipeline-url': get_env_var('CI_PIPELINE_URL'), 'concrete-spec-yaml': job_spec.to_dict(hash=ht.full_hash) } } try: with open(empty_file_path, 'w') as efd: efd.write(syaml.dump(broken_spec_details)) web_util.push_to_url(empty_file_path, broken_spec_path, keep_original=False, extra_args={'ContentType': 'text/plain'}) except Exception as err: # If we got some kind of S3 (access denied or other connection # error), the first non boto-specific class in the exception # hierarchy is Exception. Just print a warning and return msg = 'Error writing to broken specs list {0}: {1}'.format( broken_spec_path, err) tty.warn(msg) finally: shutil.rmtree(tmpdir) # We generated the "spack install ..." command to "--keep-stage", copy # any logs from the staging directory to artifacts now spack_ci.copy_stage_logs_to_artifacts(job_spec, job_log_dir) # Create buildcache on remote mirror, either on pr-specific mirror or # on the main mirror defined in the gitlab-enabled spack environment if spack_is_pr_pipeline: buildcache_mirror_url = pr_mirror_url else: buildcache_mirror_url = remote_mirror_url # If the install succeeded, create a buildcache entry for this job spec # and push it to one or more mirrors. If the install did not succeed, # print out some instructions on how to reproduce this build failure # outside of the pipeline environment. if install_exit_code == 0: can_sign = spack_ci.can_sign_binaries() sign_binaries = can_sign and spack_is_pr_pipeline is False # Create buildcache in either the main remote mirror, or in the # per-PR mirror, if this is a PR pipeline if buildcache_mirror_url: spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path, buildcache_mirror_url, sign_binaries) if cdash_build_id: tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format( cdash_build_id, buildcache_mirror_url)) spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec, buildcache_mirror_url) # Create another copy of that buildcache in the per-pipeline # temporary storage mirror (this is only done if either # artifacts buildcache is enabled or a temporary storage url # prefix is set) if pipeline_mirror_url: spack_ci.push_mirror_contents(env, job_spec, job_spec_yaml_path, pipeline_mirror_url, sign_binaries) if cdash_build_id: tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format( cdash_build_id, pipeline_mirror_url)) spack_ci.write_cdashid_to_mirror(cdash_build_id, job_spec, pipeline_mirror_url) # If this is a develop pipeline, check if the spec that we just built is # on the broken-specs list. If so, remove it. if spack_is_develop_pipeline and 'broken-specs-url' in gitlab_ci: broken_specs_url = gitlab_ci['broken-specs-url'] just_built_hash = job_spec.full_hash() broken_spec_path = url_util.join(broken_specs_url, just_built_hash) if web_util.url_exists(broken_spec_path): tty.msg('Removing {0} from the list of broken specs'.format( broken_spec_path)) try: web_util.remove_url(broken_spec_path) except Exception as err: # If we got some kind of S3 (access denied or other connection # error), the first non boto-specific class in the exception # hierarchy is Exception. Just print a warning and return msg = 'Error removing {0} from broken specs list: {1}'.format( broken_spec_path, err) tty.warn(msg) else: tty.debug('spack install exited non-zero, will not create buildcache') api_root_url = get_env_var('CI_API_V4_URL') ci_project_id = get_env_var('CI_PROJECT_ID') ci_job_id = get_env_var('CI_JOB_ID') repro_job_url = '{0}/projects/{1}/jobs/{2}/artifacts'.format( api_root_url, ci_project_id, ci_job_id) # Control characters cause this to be printed in blue so it stands out reproduce_msg = """ \033[34mTo reproduce this build locally, run: spack ci reproduce-build {0} [--working-dir <dir>] If this project does not have public pipelines, you will need to first: export GITLAB_PRIVATE_TOKEN=<generated_token> ... then follow the printed instructions.\033[0;0m """.format(repro_job_url) print(reproduce_msg) # Tie job success/failure to the success/failure of building the spec return install_exit_code
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False, allow_root=False, key=None, regenerate_index=False): """ Build a tarball from given spec and put it into the directory structure used at the mirror (following <tarball_directory_name>). """ if not spec.concrete: raise ValueError('spec must be concrete to build tarball') # set up some paths tmpdir = tempfile.mkdtemp() cache_prefix = build_cache_prefix(tmpdir) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec)) tarfile_path = os.path.join(tarfile_dir, tarfile_name) spackfile_path = os.path.join(cache_prefix, tarball_path_name(spec, '.spack')) remote_spackfile_path = url_util.join( outdir, os.path.relpath(spackfile_path, tmpdir)) mkdirp(tarfile_dir) if web_util.url_exists(remote_spackfile_path): if force: web_util.remove_url(remote_spackfile_path) else: raise NoOverwriteException(url_util.format(remote_spackfile_path)) # need to copy the spec file so the build cache can be downloaded # without concretizing with the current spack packages # and preferences spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml") specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name)) remote_specfile_path = url_util.join( outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir))) if web_util.url_exists(remote_specfile_path): if force: web_util.remove_url(remote_specfile_path) else: raise NoOverwriteException(url_util.format(remote_specfile_path)) # make a copy of the install directory to work with workdir = os.path.join(tempfile.mkdtemp(), os.path.basename(spec.prefix)) install_tree(spec.prefix, workdir, symlinks=True) # create info for later relocation and create tar write_buildinfo_file(spec.prefix, workdir, rel=rel) # optionally make the paths in the binaries relative to each other # in the spack install tree before creating tarball if rel: try: make_package_relative(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) else: try: make_package_placeholder(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) # create compressed tarball of the install prefix with closing(tarfile.open(tarfile_path, 'w:gz')) as tar: tar.add(name='%s' % workdir, arcname='%s' % os.path.basename(spec.prefix)) # remove copy of install directory shutil.rmtree(workdir) # get the sha256 checksum of the tarball checksum = checksum_tarball(tarfile_path) # add sha256 checksum to spec.yaml with open(spec_file, 'r') as inputfile: content = inputfile.read() spec_dict = yaml.load(content) bchecksum = {} bchecksum['hash_algorithm'] = 'sha256' bchecksum['hash'] = checksum spec_dict['binary_cache_checksum'] = bchecksum # Add original install prefix relative to layout root to spec.yaml. # This will be used to determine is the directory layout has changed. buildinfo = {} buildinfo['relative_prefix'] = os.path.relpath(spec.prefix, spack.store.layout.root) spec_dict['buildinfo'] = buildinfo spec_dict['full_hash'] = spec.full_hash() tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format( spec_dict['full_hash'], spec.name, url_util.format(remote_specfile_path))) tty.debug(spec.tree()) with open(specfile_path, 'w') as outfile: outfile.write(syaml.dump(spec_dict)) # sign the tarball and spec file with gpg if not unsigned: sign_tarball(key, force, specfile_path) # put tarball, spec and signature files in .spack archive with closing(tarfile.open(spackfile_path, 'w')) as tar: tar.add(name='%s' % tarfile_path, arcname='%s' % tarfile_name) tar.add(name='%s' % specfile_path, arcname='%s' % specfile_name) if not unsigned: tar.add(name='%s.asc' % specfile_path, arcname='%s.asc' % specfile_name) # cleanup file moved to archive os.remove(tarfile_path) if not unsigned: os.remove('%s.asc' % specfile_path) web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url(specfile_path, remote_specfile_path, keep_original=False) try: # create an index.html for the build_cache directory so specs can be # found if regenerate_index: generate_package_index( url_util.join(outdir, os.path.relpath(cache_prefix, tmpdir))) finally: shutil.rmtree(tmpdir) return None
def buildcache_sync(args): """ Syncs binaries (and associated metadata) from one mirror to another. Requires an active environment in order to know which specs to sync. Args: src (str): Source mirror URL dest (str): Destination mirror URL """ # Figure out the source mirror source_location = None if args.src_directory: source_location = args.src_directory scheme = url_util.parse(source_location, scheme='<missing>').scheme if scheme != '<missing>': raise ValueError( '"--src-directory" expected a local path; got a URL, instead') # Ensure that the mirror lookup does not mistake this for named mirror source_location = 'file://' + source_location elif args.src_mirror_name: source_location = args.src_mirror_name result = spack.mirror.MirrorCollection().lookup(source_location) if result.name == "<unnamed>": raise ValueError('no configured mirror named "{name}"'.format( name=source_location)) elif args.src_mirror_url: source_location = args.src_mirror_url scheme = url_util.parse(source_location, scheme='<missing>').scheme if scheme == '<missing>': raise ValueError( '"{url}" is not a valid URL'.format(url=source_location)) src_mirror = spack.mirror.MirrorCollection().lookup(source_location) src_mirror_url = url_util.format(src_mirror.fetch_url) # Figure out the destination mirror dest_location = None if args.dest_directory: dest_location = args.dest_directory scheme = url_util.parse(dest_location, scheme='<missing>').scheme if scheme != '<missing>': raise ValueError( '"--dest-directory" expected a local path; got a URL, instead') # Ensure that the mirror lookup does not mistake this for named mirror dest_location = 'file://' + dest_location elif args.dest_mirror_name: dest_location = args.dest_mirror_name result = spack.mirror.MirrorCollection().lookup(dest_location) if result.name == "<unnamed>": raise ValueError('no configured mirror named "{name}"'.format( name=dest_location)) elif args.dest_mirror_url: dest_location = args.dest_mirror_url scheme = url_util.parse(dest_location, scheme='<missing>').scheme if scheme == '<missing>': raise ValueError( '"{url}" is not a valid URL'.format(url=dest_location)) dest_mirror = spack.mirror.MirrorCollection().lookup(dest_location) dest_mirror_url = url_util.format(dest_mirror.fetch_url) # Get the active environment env = spack.cmd.require_active_env(cmd_name='buildcache sync') tty.msg('Syncing environment buildcache files from {0} to {1}'.format( src_mirror_url, dest_mirror_url)) build_cache_dir = bindist.build_cache_relative_path() buildcache_rel_paths = [] tty.debug('Syncing the following specs:') for s in env.all_specs(): tty.debug(' {0}{1}: {2}'.format('* ' if s in env.roots() else ' ', s.name, s.dag_hash())) buildcache_rel_paths.extend([ os.path.join(build_cache_dir, bindist.tarball_path_name(s, '.spack')), os.path.join(build_cache_dir, bindist.tarball_name(s, '.spec.yaml')), os.path.join(build_cache_dir, bindist.tarball_name(s, '.cdashid')) ]) tmpdir = tempfile.mkdtemp() try: for rel_path in buildcache_rel_paths: src_url = url_util.join(src_mirror_url, rel_path) local_path = os.path.join(tmpdir, rel_path) dest_url = url_util.join(dest_mirror_url, rel_path) tty.debug('Copying {0} to {1} via {2}'.format( src_url, dest_url, local_path)) stage = Stage(src_url, name="temporary_file", path=os.path.dirname(local_path), keep=True) try: stage.create() stage.fetch() web_util.push_to_url(local_path, dest_url, keep_original=True) except fs.FetchError as e: tty.debug( 'spack buildcache unable to sync {0}'.format(rel_path)) tty.debug(e) finally: stage.destroy() finally: shutil.rmtree(tmpdir)