def _create_test_files(tmpdir): """ Creates test data for each run. """ builds = { "schema-version": "1.0.0", "builds": [{ "id": "1.2.3", "arches": [get_basearch()] }], "timestamp": "2019-01-1T15:19:45Z" } data = { 'test': 'data', 'a': { 'b': 'c', } } buildsdir = os.path.join(tmpdir, 'builds') os.makedirs(buildsdir, exist_ok=True) with open(os.path.join(buildsdir, 'builds.json'), 'w') as f: f.write(json.dumps(builds)) metadir = os.path.join(tmpdir, 'builds', '1.2.3', get_basearch()) os.makedirs(metadir, exist_ok=True) with open(os.path.join(metadir, 'meta.json'), 'w') as f: f.write(json.dumps(data)) return tmpdir
def get_build_dir(self, build_id, basearch=None): if build_id == 'latest': build_id = self.get_latest() if not basearch: # just assume caller wants build dir for current arch basearch = get_basearch() return self._path(f"builds/{build_id}/{basearch}")
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--build", help="Build ID", required=True) parser.add_argument("--arch", help="target architecture", default=get_basearch()) parser.add_argument( "--fedmsg-conf", metavar="CONFIG.TOML", required=True, help="fedora-messaging config file for publishing", ) parser.add_argument("--stg", action="store_true", help="target the stg infra rather than prod") parser.add_argument( "--s3", metavar="<BUCKET>[/PREFIX]", required=True, help="bucket and prefix to S3 builds/ dir", ) parser.add_argument( "--repo", choices=["prod", "compose"], required=True, help="the name of the OSTree repo within Fedora to import into", ) return parser.parse_args()
def insert_build(self, build_id, basearch=None): if not basearch: basearch = get_basearch() # for future tooling: allow inserting in an existing build for a # separate arch for build in self._data['builds']: if build['id'] == build_id: if basearch in build['arches']: raise "Build {build_id} for {basearch} already exists" build['arches'] += [basearch] break else: self._data['builds'].insert(0, { 'id': build_id, 'arches': [basearch] })
def generate_ovf_parameters(self, raw): """ Returns a dictionary with the parameters needed to create an OVF and meta file based on the qemu, raw, and info from the build metadata """ image_size = os.stat(raw).st_size image = f'{self.meta["name"]}-{self.meta["ostree-version"]}' image_description = f'{self.meta["name"]} {self.meta["summary"]} {self.meta["ostree-version"]}' params = { 'os': os.path.basename(raw).split("-")[0], 'basearch': get_basearch(), 'image_description': image_description, 'image': image, 'image_size': str(image_size), } return params
Provides a base abstration class for build reuse. """ import logging as log import os import os.path import shutil import tempfile from cosalib.cmdlib import (get_basearch, load_json, sha256sum_file) from cosalib.builds import Builds from cosalib.meta import GenericBuildMeta as Meta from cosalib.meta import SCHEMA_PATH # BASEARCH is the current machine architecture BASEARCH = get_basearch() class BuildError(Exception): """ Base error for build issues. """ pass class BuildExistsError(BuildError): """ Thrown when a build already exists """ pass
def oscontainer_build(containers_storage, tmpdir, src, ref, image_name_and_tag, base_image, push=False, tls_verify=True, add_directories=[], cert_dir="", authfile="", digestfile=None, display_name=None, labeled_pkgs=[]): r = OSTree.Repo.new(Gio.File.new_for_path(src)) r.open(None) [_, rev] = r.resolve_rev(ref, True) if ref != rev: print("Resolved {} = {}".format(ref, rev)) [_, ostree_commit, _] = r.load_commit(rev) ostree_commitmeta = ostree_commit.get_child_value(0) versionv = ostree_commitmeta.lookup_value( "version", GLib.VariantType.new("s")) if versionv: ostree_version = versionv.get_string() else: ostree_version = None podman_base_argv = ['podman'] buildah_base_argv = ['buildah'] if containers_storage is not None: podman_base_argv.append(f"--root={containers_storage}") buildah_base_argv.append(f"--root={containers_storage}") if os.environ.get('container') is not None: print("Using nested container mode due to container environment variable") podman_base_argv.extend(NESTED_BUILD_ARGS) buildah_base_argv.extend(NESTED_BUILD_ARGS) else: print("Skipping nested container mode") # In general, we just stick with the default tmpdir set up. But if a # workdir is provided, then we want to be sure that all the heavy I/O work # that happens stays in there since e.g. we might be inside a tiny supermin # appliance. if tmpdir is not None: os.environ['TMPDIR'] = tmpdir bid = run_get_string(buildah_base_argv + ['from', base_image]) mnt = run_get_string(buildah_base_argv + ['mount', bid]) try: dest_repo = os.path.join(mnt, 'srv/repo') subprocess.check_call(['mkdir', '-p', dest_repo]) subprocess.check_call([ "ostree", "--repo=" + dest_repo, "init", "--mode=archive"]) # Note that oscontainers don't have refs; we also disable fsync # because the repo will be put into a container image and the build # process should handle its own fsync (or choose not to). print("Copying ostree commit into container: {} ...".format(rev)) run_verbose(["ostree", "--repo=" + dest_repo, "pull-local", "--disable-fsync", src, rev]) for d in add_directories: with os.scandir(d) as it: for entry in it: dest = os.path.join(mnt, entry.name) subprocess.check_call(['/usr/lib/coreos-assembler/cp-reflink', entry.path, dest]) print(f"Copied in content from: {d}") # We use /noentry to trick `podman create` into not erroring out # on a container with no cmd/entrypoint. It won't actually be run. config = ['--entrypoint', '["/noentry"]', '-l', OSCONTAINER_COMMIT_LABEL + '=' + rev] if ostree_version is not None: config += ['-l', 'version=' + ostree_version] base_pkgs = RpmOstree.db_query_all(r, rev, None) for pkg in base_pkgs: name = pkg.get_name() if name in labeled_pkgs: config += ['-l', f"com.coreos.rpm.{name}={pkg.get_evr()}.{pkg.get_arch()}"] # Generate pkglist.txt in to the oscontainer at / pkg_list_dest = os.path.join(mnt, 'pkglist.txt') # should already be sorted, but just re-sort to be sure nevras = sorted([pkg.get_nevra() for pkg in base_pkgs]) with open(pkg_list_dest, 'w') as f: for nevra in nevras: f.write(nevra) f.write('\n') meta = {} builddir = None if os.path.isfile('builds/builds.json'): with open('builds/builds.json') as fb: builds = json.load(fb)['builds'] latest_build = builds[0]['id'] arch = cmdlib.get_basearch() builddir = f"builds/{latest_build}/{arch}" metapath = f"{builddir}/meta.json" with open(metapath) as f: meta = json.load(f) rhcos_commit = meta['coreos-assembler.container-config-git']['commit'] imagegit = meta.get('coreos-assembler.container-image-git') if imagegit is not None: cosa_commit = imagegit['commit'] config += ['-l', f"com.coreos.coreos-assembler-commit={cosa_commit}"] config += ['-l', f"com.coreos.redhat-coreos-commit={rhcos_commit}"] if 'extensions' in meta: tarball = os.path.abspath(os.path.join(builddir, meta['extensions']['path'])) dest_dir = os.path.join(mnt, 'extensions') os.makedirs(dest_dir, exist_ok=True) run_verbose(["tar", "-xf", tarball], cwd=dest_dir) with open(os.path.join(dest_dir, 'extensions.json')) as f: extensions = json.load(f) extensions_label = ';'.join([ext for (ext, obj) in extensions['extensions'].items() if obj.get('kind', 'os-extension') == 'os-extension']) config += ['-l', f"com.coreos.os-extensions={extensions_label}"] for pkgname in meta['extensions']['manifest']: if pkgname in labeled_pkgs: evra = meta['extensions']['manifest'][pkgname] config += ['-l', f"com.coreos.rpm.{pkgname}={evra}"] if display_name is not None: config += ['-l', 'io.openshift.build.version-display-names=machine-os=' + display_name, '-l', 'io.openshift.build.versions=machine-os=' + ostree_version] run_verbose(buildah_base_argv + ['config'] + config + [bid]) print("Committing container...") iid = run_get_string(buildah_base_argv + ['commit', bid, image_name_and_tag]) print("{} {}".format(image_name_and_tag, iid)) finally: subprocess.call(buildah_base_argv + ['umount', bid], stdout=subprocess.DEVNULL) subprocess.call(buildah_base_argv + ['rm', bid], stdout=subprocess.DEVNULL) if push: print("Pushing container") podCmd = podman_base_argv + ['push'] if not tls_verify: tls_arg = '--tls-verify=false' else: tls_arg = '--tls-verify' podCmd.append(tls_arg) if authfile != "": podCmd.append("--authfile={}".format(authfile)) if cert_dir != "": podCmd.append("--cert-dir={}".format(cert_dir)) podCmd.append(image_name_and_tag) if digestfile is not None: podCmd.append(f'--digestfile={digestfile}') run_verbose(podCmd) elif digestfile is not None: inspect = run_get_json(podman_base_argv + ['inspect', image_name_and_tag])[0] with open(digestfile, 'w') as f: f.write(inspect['Digest'])