Exemplo n.º 1
0
    def _latest_mosc_source(self, arch, private):
        stream_name = f"{arch}{'-priv' if private else ''}"
        self.runtime.logger.info(f"Getting latest RHCOS source for {stream_name}...")
        try:
            version = self.runtime.get_minor_version()
            build_id, pullspec = rhcos.latest_machine_os_content(version, arch, private)
            if not pullspec:
                raise Exception(f"No RHCOS found for {version}")

            commitmeta = rhcos.rhcos_build_meta(build_id, version, arch, private, meta_type="commitmeta")
            rpm_list = commitmeta.get("rpmostree.rpmdb.pkglist")
            if not rpm_list:
                raise Exception(f"no pkglist in {commitmeta}")

        except Exception as ex:
            problem = f"{stream_name}: {ex}"
            red_print(f"error finding RHCOS {problem}")
            # record when there is a problem; as each arch is a separate build, make an array
            self.state.setdefault("images", {}).setdefault("machine-os-content", []).append(problem)
            return None

        # create fake brew image archive to be analyzed later for rpm inconsistencies
        archive = dict(
            build_id=f"({arch}){build_id}",
            rpms=[dict(name=r[0], epoch=r[1], nvr=f"{r[0]}-{r[2]}-{r[3]}") for r in rpm_list],
            # nothing else should be needed - if we need more, will have to fake it here
        )

        return dict(
            archive=archive,
            image_src=pullspec,
            # nothing else should be needed - if we need more, will have to fake it here
        )
Exemplo n.º 2
0
    def _find_mismatched_siblings(self, builds):
        """ Sibling images are those built from the same repository. We need to throw an error if there are sibling built from different commit.
        """
        # First, loop over all builds and store their source repos and commits to a dict
        repo_commit_nvrs = {}  # key is source repo url, value is another dict that key is commit hash and value is a set of nvrs.
        # Second, build a dict with keys are NVRs and values are the ImageMetadata objects. ImageMetadatas are used for logging state.
        nvr_images = {}

        for record in builds:
            # source repo url and commit hash are stored in image's environment variables.
            ar = record.archives[0]  # the build is a manifest list, let's look at the first architecture
            envs = ar["extra"]["docker"]["config"]["config"].get("Env", [])
            source_repo_entry = list(filter(lambda env: env.startswith("SOURCE_GIT_URL="), envs))
            source_commit_entry = list(filter(lambda env: env.startswith("SOURCE_GIT_COMMIT="), envs))
            if not source_repo_entry or not source_commit_entry:
                continue  # this image doesn't have required environment variables. is it a dist-git only image?
            source_repo = source_repo_entry[0][source_repo_entry[0].find("=") + 1:]  # SOURCE_GIT_URL=https://example.com => https://example.com
            source_commit = source_commit_entry[0][source_commit_entry[0].find("=") + 1:]  # SOURCE_GIT_COMMIT=abc => abc
            nvrs = repo_commit_nvrs.setdefault(source_repo, {}).setdefault(source_commit, set())
            nvrs.add(record.build["nvr"])
            nvr_images[record.build["nvr"]] = record.image

        # Finally, look at the dict and print an error if one repo has 2 or more commits
        mismatched_siblings = set()
        for repo, commit_nvrs in repo_commit_nvrs.items():
            if len(commit_nvrs) >= 2:
                red_print("The following NVRs are siblings but built from different commits:")
                for commit, nvrs in commit_nvrs.items():
                    for nvr in nvrs:
                        image = nvr_images[nvr]
                        mismatched_siblings.add(image.distgit_key)
                        red_print(f"{nvr}\t{image.distgit_key}\t{repo}\t{commit}")
        return mismatched_siblings
Exemplo n.º 3
0
def update_and_build(nvr, stream, runtime, merge_branch, force_build=False):
    """Module entrypoint, orchestrate update and build steps of metadata repos

    :param string nvr: Operator name-version-release
    :param string stream: Which metadata repo should be updated (dev, stage, prod)
    :param Runtime runtime: a runtime instance
    :param string merge_branch: Which branch should be updated in the metadata repo
    :return bool True if operations succeeded, False if something went wrong
    """
    op_md = OperatorMetadataBuilder(nvr, stream, runtime=runtime)

    if not op_md.update_metadata_repo(merge_branch) and not force_build:
        logger.info('No changes in metadata repo, skipping build')
        print(
            OperatorMetadataLatestBuildReporter(op_md.operator_name,
                                                runtime).get_latest_build())
        return True

    if not op_md.build_metadata_container():
        util.red_print('Build of {} failed, see debug.log'.format(
            op_md.metadata_repo))
        return False

    print(
        OperatorMetadataLatestBuildReporter(op_md.operator_name,
                                            runtime).get_latest_build())
    return True
Exemplo n.º 4
0
    def find_mismatched_siblings(
        build_image_inspectors: Iterable[Optional[BrewBuildImageInspector]]
    ) -> List[Tuple[BrewBuildImageInspector, BrewBuildImageInspector]]:
        """
        Sibling images are those built from the same repository. We need to throw an error
        if there are sibling built from different commits.
        :return: Returns a list of (BrewBuildImageInspector,BrewBuildImageInspector) where the first item is a mismatched sibling of the second
        """
        class RepoBuildRecord(NamedTuple):
            build_image_inspector: BrewBuildImageInspector
            source_git_commit: str

        # Maps SOURCE_GIT_URL -> RepoBuildRecord(SOURCE_GIT_COMMIT, DISTGIT_KEY, NVR). Where the Tuple is the first build
        # encountered claiming it is sourced from the SOURCE_GIT_URL
        repo_builds: Dict[str, RepoBuildRecord] = dict()

        mismatched_siblings: List[Tuple[BrewBuildImageInspector,
                                        BrewBuildImageInspector]] = []
        for build_image_inspector in build_image_inspectors:

            if not build_image_inspector:
                # No build for this component at present.
                continue

            # Here we check the raw config - before it is affected by assembly overrides. Why?
            # If an artist overrides one sibling's git url, but not another, the following
            # scan would not be able to detect that they were siblings. Instead, we rely on the
            # original image metadata to determine sibling-ness.
            source_url = build_image_inspector.get_image_meta(
            ).raw_config.content.source.git.url

            source_git_commit = build_image_inspector.get_source_git_commit()
            if not source_url or not source_git_commit:
                # This is true for distgit only components.
                continue

            # Make sure URLs are comparable regardless of git: or https:
            source_url = convert_remote_git_to_https(source_url)

            potential_conflict: RepoBuildRecord = repo_builds.get(
                source_url, None)
            if potential_conflict:
                # Another component has build from this repo before. Make
                # sure it built from the same commit.
                if potential_conflict.source_git_commit != source_git_commit:
                    mismatched_siblings.append(
                        (build_image_inspector,
                         potential_conflict.build_image_inspector))
                    red_print(
                        f"The following NVRs are siblings but built from different commits: {potential_conflict.build_image_inspector.get_nvr()} and {build_image_inspector.get_nvr()}",
                        file=sys.stderr)
            else:
                # No conflict, so this is our first encounter for this repo; add it to our tracking dict.
                repo_builds[source_url] = RepoBuildRecord(
                    build_image_inspector=build_image_inspector,
                    source_git_commit=source_git_commit)

        return mismatched_siblings
Exemplo n.º 5
0
    def _get_mirror_sources(self, latest_builds, mismatched_siblings):
        """
        Determine the image sources to mirror to each arch-private-specific imagestream,
        excluding mismatched siblings; also record success/failure per state.

        :return: map[(arch, private)] -> map[image_name] -> { version: release: image_src: digest: build_record: }
        """
        mirroring = {}
        for record in latest_builds:
            image = record.image
            error = None
            if image.distgit_key in mismatched_siblings:
                error = "Siblings built from different commits"
            else:
                for archive in record.archives:
                    arch = archive["arch"]
                    pullspecs = archive["extra"]["docker"]["repositories"]
                    if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                        error = f"Unable to find pullspecs for: {image.image_name_short}"
                        red_print(error)
                        state.record_image_fail(self.state, image, error, self.runtime.logger)
                        continue
                    # The tag that will be used in the imagestreams
                    tag_name = image.image_name_short
                    tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                    digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                    if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                        raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")

                    mirroring_value = dict(
                        version=record.build["version"],
                        release=record.build["release"],
                        image_src=pullspecs[-1],
                        digest=digest,
                        build_record=record,
                        archive=archive,
                    )

                    if record.private:  # exclude embargoed images from the ocp[-arch] imagestreams
                        yellow_print(f"Omitting embargoed image {pullspecs[-1]}")
                    else:
                        self.runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                        mirroring.setdefault((arch, False), {})[tag_name] = mirroring_value

                    if self.runtime.group_config.public_upstreams:
                        # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                        self.runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                        mirroring.setdefault((arch, True), {})[tag_name] = mirroring_value

            # per build, record in the state whether we can successfully mirror it
            if error:
                red_print(error)
                state.record_image_fail(self.state, image, error, self.runtime.logger)
            else:
                state.record_image_success(self.state, image)

        return mirroring
Exemplo n.º 6
0
    def _get_payload_and_non_release_images(self, images):
        payload_images = []
        non_release_items = []
        for image in images:
            if image.for_release:
                payload_images.append(image)
                continue
            non_release_items.append(image.image_name_short)
            red_print(f"NOT adding to IS (non_release: true): {image.image_name_short}")

        return payload_images, non_release_items
Exemplo n.º 7
0
    def _get_payload_images(self, images):
        # images is a list of image metadata - pick out payload images
        payload_images = []
        invalid_name_items = []
        for image in images:
            if image.is_payload:
                """
                <Tim Bielawa> note to self: is only for `ose-` prefixed images
                <Clayton Coleman> Yes, Get with the naming system or get out of town
                """
                if image.image_name_short.startswith("ose-"):
                    payload_images.append(image)
                    continue

                invalid_name_items.append(image.image_name_short)
                red_print(f"NOT adding to IS (does not meet name/version conventions): {image.image_name_short}")

        return payload_images, invalid_name_items
Exemplo n.º 8
0
def release_gen_payload(runtime: Runtime, is_name: Optional[str],
                        is_namespace: Optional[str],
                        organization: Optional[str], repository: Optional[str],
                        exclude_arch: Tuple[str, ...], skip_gc_tagging: bool,
                        emergency_ignore_issues: bool):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.

## Validation ##

Additionally we want to check that the following conditions are true for each
imagestream being updated:

* For all architectures built, RHCOS builds must have matching versions of any
  unshipped RPM they include (per-entry os metadata - the set of RPMs may differ
  between arches, but versions should not).
* Any RPMs present in images (including machine-os-content) from unshipped RPM
  builds included in one of our candidate tags must exactly version-match the
  latest RPM builds in those candidate tags (ONLY; we never flag what we don't
  directly ship.)

These checks (and likely more in the future) should run and any failures should
be listed in brief via a "release.openshift.io/inconsistency" annotation on the
relevant image istag (these are publicly visible; ref. https://bit.ly/37cseC1)
and in more detail in state.yaml. The release-controller, per ART-2195, will
read and propagate/expose this annotation in its display of the release image.
    """
    runtime.initialize(mode='both',
                       clone_distgits=False,
                       clone_source=False,
                       prevent_cloning=True)

    if runtime.assembly not in {
            None, "stream", "test"
    } and runtime.assembly not in runtime.releases_config.releases:
        raise DoozerFatalError(
            f"Assembly '{runtime.assembly}' is not explicitly defined.")

    logger = runtime.logger
    brew_session = runtime.build_retrying_koji_client()

    base_imagestream_name: str = is_name if is_name else assembly_imagestream_base_name(
        runtime)
    base_istream_namespace: str = is_namespace if is_namespace else default_imagestream_namespace_base_name(
    )

    if runtime.assembly and runtime.assembly != 'stream' and 'art-latest' in base_imagestream_name:
        raise ValueError(
            'The art-latest imagestreams should not be used for an assembly other than "stream"'
        )

    logger.info(
        f'Collecting latest information associated with the assembly: {runtime.assembly}'
    )
    assembly_inspector = AssemblyInspector(runtime, brew_session)
    logger.info('Checking for mismatched siblings...')
    mismatched_siblings = PayloadGenerator.find_mismatched_siblings(
        assembly_inspector.get_group_release_images().values())

    # A list of strings that denote inconsistencies across all payloads generated
    assembly_issues: List[AssemblyIssue] = list()

    for mismatched_bbii, sibling_bbi in mismatched_siblings:
        mismatch_issue = AssemblyIssue(
            f'{mismatched_bbii.get_nvr()} was built from a different upstream source commit ({mismatched_bbii.get_source_git_commit()[:7]}) than one of its siblings {sibling_bbi.get_nvr()} from {sibling_bbi.get_source_git_commit()[:7]}',
            component=mismatched_bbii.get_image_meta().distgit_key,
            code=AssemblyIssueCode.MISMATCHED_SIBLINGS)
        assembly_issues.append(mismatch_issue)

    report = dict()
    report['non_release_images'] = [
        image_meta.distgit_key
        for image_meta in runtime.get_non_release_image_metas()
    ]
    report['release_images'] = [
        image_meta.distgit_key
        for image_meta in runtime.get_for_release_image_metas()
    ]
    report['missing_image_builds'] = [
        dgk
        for (dgk, ii) in assembly_inspector.get_group_release_images().items()
        if ii is None
    ]  # A list of metas where the assembly did not find a build

    if runtime.assembly_type is AssemblyTypes.STREAM:
        # Only nightlies have the concept of private and public payloads
        privacy_modes = [False, True]
    else:
        privacy_modes = [False]

    # Structure to record rhcos builds we use so that they can be analyzed for inconsistencies
    targeted_rhcos_builds: Dict[bool, List[RHCOSBuildInspector]] = {
        False: [],
        True: []
    }
    """
    Collect a list of builds we to tag in order to prevent garbage collection.
    Note: we also use this list to warm up caches, so don't wrap this section
    with `if not skip_gc_tagging`.

    To prevent garbage collection for custom
    assemblies (which won't normally be released via errata tool, triggering
    the traditional garbage collection prevention), we must tag these builds
    explicitly to prevent their GC. It is necessary to prevent GC, because
    we want to be able to build custom releases off of custom releases, and
    so on. If we loose images and builds for custom releases in brew due
    to garbage collection, we will not be able to construct derivative
    release payloads.
    """
    assembly_build_ids: Set[int] = set(
    )  # This list of builds associated with the group/assembly will be used to warm up caches

    list_tags_tasks: Dict[Tuple[int, str], Any] = dict(
    )  # Maps (build_id, tag) tuple to multicall task to list tags
    with runtime.pooled_koji_client_session() as pcs:
        with pcs.multicall(strict=True) as m:
            for bbii in assembly_inspector.get_group_release_images().values():
                if bbii:
                    build_id = bbii.get_brew_build_id()
                    assembly_build_ids.add(
                        build_id)  # Collect up build ids for cache warm up
                    hotfix_tag = bbii.get_image_meta().hotfix_brew_tag()
                    list_tags_tasks[(build_id,
                                     hotfix_tag)] = m.listTags(build=build_id)

            # RPMs can build for multiple versions of RHEL. For example, a single RPM
            # metadata can target 7 & 8.
            # For each rhel version targeted by our RPMs, build a list of RPMs
            # appropriate for the RHEL version with respect to the group/assembly.
            rhel_version_scanned_for_rpms: Dict[int, bool] = dict(
            )  # Maps rhel version -> bool indicating whether we have processed that rhel version
            for rpm_meta in runtime.rpm_metas():
                for el_ver in rpm_meta.determine_rhel_targets():
                    if el_ver in rhel_version_scanned_for_rpms:
                        # We've already processed this RHEL version.
                        continue
                    hotfix_tag = runtime.get_default_hotfix_brew_tag(
                        el_target=el_ver)
                    # Otherwise, query the assembly for this rhel version now.
                    for dgk, rpm_build_dict in assembly_inspector.get_group_rpm_build_dicts(
                            el_ver=el_ver).items():
                        if not rpm_build_dict:
                            # RPM not built for this rhel version
                            continue
                        build_id = rpm_build_dict['id']
                        assembly_build_ids.add(
                            build_id)  # For cache warm up later.
                        list_tags_tasks[(build_id, hotfix_tag)] = m.listTags(
                            build=build_id)
                    # Record that we are done for this rhel version.
                    rhel_version_scanned_for_rpms[el_ver] = True

    # Tasks should now contain tag list information for all builds associated with this assembly.
    # and assembly_build_ids should contain ids for builds that should be cached.

    # We have a list of image and RPM builds associated with this assembly.
    # Tag them unless we have been told not to from the command line.
    if runtime.assembly_type != AssemblyTypes.STREAM and not skip_gc_tagging:
        with runtime.shared_koji_client_session() as koji_api:
            koji_api.gssapi_login()  # Tagging requires authentication
            with koji_api.multicall() as m:
                for tup, list_tag_task in list_tags_tasks.items():
                    build_id = tup[0]
                    desired_tag = tup[1]
                    current_tags = [
                        tag_entry['name'] for tag_entry in list_tag_task.result
                    ]
                    if desired_tag not in current_tags:
                        # The hotfix tag is missing, so apply it.
                        runtime.logger.info(
                            f'Adding tag {desired_tag} to build: {build_id} to prevent garbage collection.'
                        )
                        m.tagBuild(desired_tag, build_id)

    with runtime.shared_build_status_detector() as bsd:
        bsd.populate_archive_lists(assembly_build_ids)
        bsd.find_shipped_builds(assembly_build_ids)
    """
    Make sure that RPMs belonging to this assembly/group are consistent with the assembly definition.
    """
    for rpm_meta in runtime.rpm_metas():
        issues = assembly_inspector.check_group_rpm_package_consistency(
            rpm_meta)
        assembly_issues.extend(issues)
    """
    If this is a stream assembly, images which are not using the latest builds should not reach
    the release controller. Other assemblies are meant to be constructed from non-latest.
    """
    if runtime.assembly == 'stream':
        for dgk, build_inspector in assembly_inspector.get_group_release_images(
        ).items():
            if build_inspector:
                non_latest_rpm_nvrs = build_inspector.find_non_latest_rpms()
                dgk = build_inspector.get_image_meta().distgit_key
                for installed_nvr, newest_nvr in non_latest_rpm_nvrs:
                    # This indicates an issue with scan-sources or that an image is no longer successfully building.
                    # Impermissible as this speaks to a potentially deeper issue of images not being rebuilt
                    outdated_issue = AssemblyIssue(
                        f'Found outdated RPM ({installed_nvr}) installed in {build_inspector.get_nvr()} when {newest_nvr} was available',
                        component=dgk,
                        code=AssemblyIssueCode.OUTDATED_RPMS_IN_STREAM_BUILD)
                    assembly_issues.append(
                        outdated_issue)  # Add to overall issues
    """
    Make sure image build selected by this assembly/group are consistent with the assembly definition.
    """
    for dgk, bbii in assembly_inspector.get_group_release_images().items():
        if bbii:
            issues = assembly_inspector.check_group_image_consistency(bbii)
            assembly_issues.extend(issues)

    for arch in runtime.arches:
        if arch in exclude_arch:
            logger.info(f'Excluding payload files architecture: {arch}')
            continue

        # Whether private or public, the assembly's canonical payload content is the same.
        entries: Dict[str, PayloadGenerator.
                      PayloadEntry] = PayloadGenerator.find_payload_entries(
                          assembly_inspector, arch,
                          f'quay.io/{organization}/{repository}'
                      )  # Key of this dict is release payload tag name

        for tag, payload_entry in entries.items():
            if payload_entry.image_meta:
                # We already stored inconsistencies for each image_meta; look them up if there are any.
                payload_entry.issues.extend(
                    filter(
                        lambda ai: ai.component == payload_entry.image_meta.
                        distgit_key, assembly_issues))
            elif payload_entry.rhcos_build:
                assembly_issues.extend(
                    assembly_inspector.check_rhcos_issues(
                        payload_entry.rhcos_build))
                payload_entry.issues.extend(
                    filter(lambda ai: ai.component == 'rhcos',
                           assembly_issues))
                if runtime.assembly == 'stream':
                    # For stream alone, we want to enforce that the very latest RPMs are installed.
                    non_latest_rpm_nvrs = payload_entry.rhcos_build.find_non_latest_rpms(
                    )
                    for installed_nvr, newest_nvr in non_latest_rpm_nvrs:
                        assembly_issues.append(
                            AssemblyIssue(
                                f'Found outdated RPM ({installed_nvr}) installed in {payload_entry.rhcos_build} when {newest_nvr} is available',
                                component='rhcos',
                                code=AssemblyIssueCode.
                                OUTDATED_RPMS_IN_STREAM_BUILD))
            else:
                raise IOError(f'Unsupported PayloadEntry: {payload_entry}')

        # Save the default SRC=DEST input to a file for syncing by 'oc image mirror'. Why is
        # there no '-priv'? The true images for the assembly are what we are syncing -
        # it is what we update in the imagestream that defines whether the image will be
        # part of a public release.
        dests: Set[str] = set(
        )  # Prevents writing the same destination twice (not supported by oc)
        with io.open(f"src_dest.{arch}", "w+", encoding="utf-8") as out_file:
            for payload_entry in entries.values():
                if not payload_entry.archive_inspector:
                    # Nothing to mirror (e.g. machine-os-content)
                    continue
                if payload_entry.dest_pullspec in dests:
                    # Don't write the same destination twice.
                    continue
                out_file.write(
                    f"{payload_entry.archive_inspector.get_archive_pullspec()}={payload_entry.dest_pullspec}\n"
                )
                dests.add(payload_entry.dest_pullspec)

        for private_mode in privacy_modes:
            logger.info(
                f'Building payload files for architecture: {arch}; private: {private_mode}'
            )

            file_suffix = arch + '-priv' if private_mode else arch
            with io.open(f"image_stream.{file_suffix}.yaml",
                         "w+",
                         encoding="utf-8") as out_file:
                istags: List[Dict] = []
                for payload_tag_name, payload_entry in entries.items():
                    if payload_entry.build_inspector and payload_entry.build_inspector.is_under_embargo(
                    ) and private_mode is False:
                        # Don't send this istag update to the public release controller
                        continue
                    istags.append(
                        PayloadGenerator.build_payload_istag(
                            payload_tag_name, payload_entry))

                imagestream_name, imagestream_namespace = payload_imagestream_name_and_namespace(
                    base_imagestream_name, base_istream_namespace, arch,
                    private_mode)

                istream_spec = PayloadGenerator.build_payload_imagestream(
                    imagestream_name, imagestream_namespace, istags,
                    assembly_issues)
                yaml.safe_dump(istream_spec,
                               out_file,
                               indent=2,
                               default_flow_style=False)

    # Now make sure that all of the RHCOS builds contain consistent RPMs
    for private_mode in privacy_modes:
        rhcos_builds = targeted_rhcos_builds[private_mode]
        rhcos_inconsistencies: Dict[
            str,
            List[str]] = PayloadGenerator.find_rhcos_build_rpm_inconsistencies(
                rhcos_builds)
        if rhcos_inconsistencies:
            assembly_issues.append(
                AssemblyIssue(
                    f'Found RHCOS inconsistencies in builds {targeted_rhcos_builds}: {rhcos_inconsistencies}',
                    component='rhcos',
                    code=AssemblyIssueCode.INCONSISTENT_RHCOS_RPMS))

    # If the assembly claims to have reference nightlies, assert that our payload
    # matches them exactly.
    nightly_match_issues = PayloadGenerator.check_nightlies_consistency(
        assembly_inspector)
    if nightly_match_issues:
        assembly_issues.extend(nightly_match_issues)

    assembly_issues_report: Dict[str, List[Dict]] = dict()
    report['assembly_issues'] = assembly_issues_report

    overall_permitted = True
    for ai in assembly_issues:
        permitted = assembly_inspector.does_permit(ai)
        overall_permitted &= permitted  # If anything is not permitted, exit with an error
        assembly_issues_report.setdefault(ai.component, []).append({
            'code':
            ai.code.name,
            'msg':
            ai.msg,
            'permitted':
            permitted
        })

    report['viable'] = overall_permitted

    print(yaml.dump(report, default_flow_style=False, indent=2))
    if not overall_permitted:
        red_print(
            'DO NOT PROCEED WITH THIS ASSEMBLY PAYLOAD -- not all detected issues are permitted.',
            file=sys.stderr)
        if not emergency_ignore_issues:
            exit(1)
    exit(0)
Exemplo n.º 9
0
    def get_group_payload_tag_mapping(
            assembly_inspector: AssemblyInspector,
            arch: str) -> Dict[str, Optional[ArchiveImageInspector]]:
        """
        Each payload tag name used to map exactly to one release imagemeta. With the advent of '-alt' images,
        we need some logic to determine which images map to which payload tags for a given architecture.
        :return: Returns a map[payload_tag_name] -> ArchiveImageInspector containing an image for the payload. The value may be
                 None if there is no arch specific build for the tag. This does not include machine-os-content since that
                 is not a member of the group.
        """
        brew_arch = brew_arch_for_go_arch(
            arch)  # Make certain this is brew arch nomenclature
        members: Dict[str, Optional[ArchiveImageInspector]] = dict(
        )  # Maps release payload tag name to the archive which should populate it
        for dgk, build_inspector in assembly_inspector.get_group_release_images(
        ).items():

            if build_inspector is None:
                # There was no build for this image found associated with the assembly.
                # In this case, don't put the tag_name into the imagestream. This is not good,
                # so be verbose.
                red_print(
                    f'Unable to find build for {dgk} for {assembly_inspector.get_assembly_name()}',
                    file=sys.stderr)
                continue

            image_meta: ImageMetadata = assembly_inspector.runtime.image_map[
                dgk]

            if not image_meta.is_payload:
                # Nothing to do for images which are not in the payload
                continue

            tag_name, explicit = image_meta.get_payload_tag_info(
            )  # The tag that will be used in the imagestreams and whether it was explicitly declared.

            if arch not in image_meta.get_arches():
                # If this image is not meant for this architecture
                if tag_name not in members:
                    members[
                        tag_name] = None  # We still need a placeholder in the tag mapping
                continue

            if members.get(tag_name, None) and not explicit:
                # If we have already found an entry, there is a precedence we honor for
                # "-alt" images. Specifically, if a imagemeta declares its payload tag
                # name explicitly, it will take precedence over any other entries
                # https://issues.redhat.com/browse/ART-2823
                # This was tag not explicitly declared, so ignore the duplicate image.
                continue

            archive_inspector = build_inspector.get_image_archive_inspector(
                brew_arch)

            if not archive_inspector:
                # There is no build for this CPU architecture for this image_meta/build. This finding
                # conflicts with the `arch not in image_meta.get_arches()` check above.
                # Best to fail.
                raise IOError(
                    f'{dgk} claims to be built for {image_meta.get_arches()} but did not find {brew_arch} build for {build_inspector.get_brew_build_webpage_url()}'
                )

            members[tag_name] = archive_inspector

        return members
Exemplo n.º 10
0
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.
    """
    runtime.initialize(clone_distgits=False, config_excludes='non_release')
    orgrepo = "{}/{}".format(organization, repository)
    cmd = runtime.command
    runtime.state[cmd] = dict(state.TEMPLATE_IMAGE)
    lstate = runtime.state[cmd]  # get local convenience copy

    if not is_name:
        is_name = default_is_base_name(runtime.get_minor_version())
    if not is_namespace:
        is_namespace = default_is_base_namespace()

    images = [i for i in runtime.image_metas()]
    lstate['total'] = len(images)

    no_build_items = []
    invalid_name_items = []

    payload_images = []
    for image in images:
        # Per clayton:
        """Tim Bielawa: note to self: is only for `ose-` prefixed images
        Clayton Coleman: Yes, Get with the naming system or get out of town
        """
        if image.is_payload:
            if not image.image_name_short.startswith("ose-"):
                invalid_name_items.append(image.image_name_short)
                red_print("NOT adding to IS (does not meet name/version conventions): {}".format(image.image_name_short))
                continue
            else:
                payload_images.append(image)

    runtime.logger.info("Fetching latest image builds from Brew...")
    tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
    brew_session = runtime.build_retrying_koji_client()
    latest_builds = brew.get_latest_builds(tag_component_tuples, "image", event_id, brew_session)
    latest_builds = [builds[0] if builds else None for builds in latest_builds]  # flatten the data structure

    runtime.logger.info("Fetching image archives...")
    build_ids = [b["id"] if b else 0 for b in latest_builds]
    archives_list = brew.list_archives_by_builds(build_ids, "image", brew_session)

    mismatched_siblings = find_mismatched_siblings(payload_images, latest_builds, archives_list, runtime.logger, lstate)

    embargoed_build_ids = set()  # a set of private image build ids
    if runtime.group_config.public_upstreams:
        # looking for embargoed image builds
        detector = embargo_detector.EmbargoDetector(brew_session, runtime.logger)
        for index, archive_list in enumerate(archives_list):
            if build_ids[index]:
                detector.archive_lists[build_ids[index]] = archive_list  # store to EmbargoDetector cache to limit Brew queries
        suspects = [b for b in latest_builds if b]
        embargoed_build_ids = detector.find_embargoed_builds(suspects)

    runtime.logger.info("Creating mirroring lists...")

    # These will map[arch] -> map[image_name] -> { version: version, release: release, image_src: image_src }
    mirroring = {}
    for i, image in enumerate(payload_images):
        latest_build = latest_builds[i]
        archives = archives_list[i]
        error = None
        if image.distgit_key in mismatched_siblings:
            error = "Siblings built from different commits"
        elif not (latest_build and archives):  # build or archive doesn't exist
            error = f"Unable to find build for: {image.image_name_short}"
            no_build_items.append(image.image_name_short)
        else:
            for archive in archives:
                arch = archive["arch"]
                pullspecs = archive["extra"]["docker"]["repositories"]
                if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                    error = f"Unable to find pullspecs for: {image.image_name_short}"
                    red_print(error, file=sys.stderr)
                    state.record_image_fail(lstate, image, error, runtime.logger)
                    break
                # The tag that will be used in the imagestreams
                tag_name = image.image_name_short
                tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                    raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")
                mirroring_value = {'version': latest_build["version"], 'release': latest_build["release"], 'image_src': pullspecs[-1], 'digest': digest}
                embargoed = latest_build["id"] in embargoed_build_ids  # when public_upstreams are not configured, this is always false
                if not embargoed:  # exclude embargoed images from the ocp[-arch] imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(arch, {})[tag_name] = mirroring_value
                else:
                    red_print(f"Found embargoed image {pullspecs[-1]}")
                if runtime.group_config.public_upstreams:
                    # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(f"{arch}-priv", {})[tag_name] = mirroring_value
        if not error:
            state.record_image_success(lstate, image)
        else:
            red_print(error, file=sys.stderr)
            state.record_image_fail(lstate, image, error, runtime.logger)

    for key in mirroring:
        private = key.endswith("-priv")
        arch = key[:-5] if private else key  # strip `-priv` suffix

        mirror_filename = 'src_dest.{}'.format(key)
        imagestream_filename = 'image_stream.{}'.format(key)
        target_is_name, target_is_namespace = is_name_and_space(is_name, is_namespace, arch, private)

        def build_dest_name(tag_name):
            entry = mirroring[key][tag_name]
            tag = entry["digest"].replace(":", "-")  # sha256:abcdef -> sha256-abcdef
            return f"quay.io/{orgrepo}:{tag}"

        # Save the default SRC=DEST 'oc image mirror' input to a file for
        # later.
        with io.open(mirror_filename, 'w+', encoding="utf-8") as out_file:
            for tag_name in mirroring[key]:
                dest = build_dest_name(tag_name)
                out_file.write("{}={}\n".format(mirroring[key][tag_name]['image_src'], dest))

        with io.open("{}.yaml".format(imagestream_filename), 'w+', encoding="utf-8") as out_file:
            # Add a tag spec to the image stream. The name of each tag
            # spec does not include the 'ose-' prefix. This keeps them
            # consistent between OKD and OCP

            # Template Base Image Stream object.
            tag_list = []
            isb = {
                'kind': 'ImageStream',
                'apiVersion': 'image.openshift.io/v1',
                'metadata': {
                    'name': target_is_name,
                    'namespace': target_is_namespace,
                },
                'spec': {
                    'tags': tag_list,
                }
            }

            for tag_name in mirroring[key]:
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': build_dest_name(tag_name)
                    }
                })

            # mirroring rhcos
            runtime.logger.info(f"Getting latest RHCOS pullspec for {target_is_name}...")
            mosc_istag = _latest_mosc_istag(runtime, arch, private)
            if mosc_istag:
                tag_list.append(mosc_istag)

            # Not all images are built for non-x86 arches (e.g. kuryr), but they
            # may be mentioned in image references. Thus, make sure there is a tag
            # for every tag we find in x86_64 and provide just a dummy image.
            if 'cli' not in mirroring[key]:  # `cli` serves as the dummy image for the replacement
                if runtime.group_config.public_upstreams and not private:  # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
                    runtime.logger.warning(f"Unable to find cli tag from {key} imagestream. Is `cli` image embargoed?")
                else:  # if CVE embargoes supporting is disabled or the "cli" image is also missing in *-priv namespaces, an error will be raised.
                    raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))
            else:
                extra_tags = mirroring['x86_64-priv' if private else 'x86_64'].keys() - mirroring[key].keys()
                for tag_name in extra_tags:
                    yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                    tag_list.append({
                        'name': tag_name,
                        'from': {
                            'kind': 'DockerImage',
                            'name': build_dest_name('cli')  # cli is always built and is harmless
                        }
                    })

            yaml.safe_dump(isb, out_file, indent=2, default_flow_style=False)

    if no_build_items:
        yellow_print("No builds found for:")
        for img in sorted(no_build_items):
            click.echo("   {}".format(img))

    if invalid_name_items:
        yellow_print("Images skipped due to invalid naming:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))

    if mismatched_siblings:
        yellow_print("Images skipped due to siblings mismatch:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))
Exemplo n.º 11
0
async def get_nightlies(runtime, matching: List[str], exclude_arch: List[str],
                        allow_pending: bool, allow_rejected: bool, limit: str,
                        details: bool, latest: bool):
    """
    Find set(s) including a nightly for each arch with matching contents
    according to source commits and NVRs (or in the case of RHCOS containers,
    RPM content).

    \b
    By default:
    * only one set of nightlies (the most recent) is displayed (see --limit)
    * only accepted nightlies will be examined (see --allow-pending/rejected)
    * all arches configured for the group will be required (see --exclude-arch)

    You may also specify a desired nightly or nightlies (see --matching) to filter
    results to only sets that include the matched nightlies in their respective
    arch(es). Examples:

     \b
     $ doozer -q -g openshift-4.8 get-nightlies --limit 3
     4.8.0-0.nightly-s390x-2022-07-19-121001 4.8.0-0.nightly-ppc64le-2022-07-19-120922 4.8.0-0.nightly-2022-07-19-120845
     4.8.0-0.nightly-s390x-2022-07-19-001435 4.8.0-0.nightly-ppc64le-2022-07-19-001401 4.8.0-0.nightly-2022-07-19-001308
     4.8.0-0.nightly-s390x-2022-07-15-184648 4.8.0-0.nightly-ppc64le-2022-07-15-184608 4.8.0-0.nightly-2022-07-15-190253

     \b
     Match one preferred nightly:
     $ doozer -q -g openshift-4.8 get-nightlies --matching 4.8.0-0.nightly-2022-07-19-001308
     4.8.0-0.nightly-s390x-2022-07-19-001435 4.8.0-0.nightly-ppc64le-2022-07-19-001401 4.8.0-0.nightly-2022-07-19-001308
     \b
     Match nightlies in multiple arches:
     $ doozer -q -g openshift-4.8 get-nightlies --matching 4.8.0-0.nightly-2022-07-19-001308 --matching 4.8.0-0.nightly-ppc64le-2022-07-19-120922
     No sets of equivalent nightlies found for given parameters.
     \b
     Match two nightlies in same arch:
     $ doozer -q -g openshift-4.8 get-nightlies --limit 3 --matching 4.8.0-0.nightly-2022-07-19-001308 --matching 4.8.0-0.nightly-2022-07-15-190253
     4.8.0-0.nightly-s390x-2022-07-19-001435 4.8.0-0.nightly-ppc64le-2022-07-19-001401 4.8.0-0.nightly-2022-07-19-001308
     4.8.0-0.nightly-s390x-2022-07-15-184648 4.8.0-0.nightly-ppc64le-2022-07-15-184608 4.8.0-0.nightly-2022-07-15-190253

    All matches specified must exist (to guard against typo/mis-paste) with the correct state:

     \b
     $ doozer -q -g openshift-4.8 get-nightlies --matching 4.8.0-that-exists-not
     Found no nightlies in state {'Accepted'} matching {'4.8.0-that-exists-not'}

    If results do not include a nightly that you expect to see, check the
    doozer debug.log where details about equivalence failures are logged.
    Matching is performed in two phases:

      * The first uses only info from the nightly release images for quick
        comparison in order to construct candidate equivalent sets of nightlies.
      * The second retrieves image info for all payload content in order to
        compare group image NVRs and RHCOS RPM content.
    """
    # parameter validation/processing
    limit = int(limit)
    if latest and limit > 1:
        raise ValueError("Don't use --latest and --limit > 1")
    if limit < 1:
        raise ValueError("--limit must be a positive integer")
    if latest:
        allow_pending = True
        allow_rejected = True
    runtime.initialize(clone_distgits=False)
    include_arches: Set[str] = determine_arch_list(runtime, set(exclude_arch))

    # make lists of nightly objects per arch
    try:
        nightlies_for_arch: Dict[str, List[Nightly]] = {
            arch: [Nightly(nightly_info=n) for n in nightlies]
            for arch, nightlies in
            find_rc_nightlies(runtime, include_arches, allow_pending,
                              allow_rejected, matching).items()
        }
    except NoMatchingNightlyException as ex:
        util.red_print(ex)
        exit(1)

    # retrieve release info for each nightly image (with concurrency)
    await asyncio.gather(*[
        nightly.populate_nightly_release_data()
        for arch, nightlies in nightlies_for_arch.items()
        for nightly in nightlies
    ])

    # find sets of nightlies where all arches have equivalent content
    nightly_sets = []
    for nightly_set in generate_nightly_sets(nightlies_for_arch):
        # check for deeper equivalence
        await nightly_set.populate_nightly_content(runtime)
        if nightly_set.deeper_equivalence():
            nightly_sets.append(nightly_set)
            util.green_print(nightly_set.details() if details else nightly_set)
            if len(nightly_sets) >= limit:
                break  # don't spend time checking more than were requested

    if not nightly_sets:
        util.red_print(
            "No sets of equivalent nightlies found for given parameters.")
        exit(1)