コード例 #1
0
    def _get_latest_builds(self, payload_images):
        """
        find latest brew build (at event, if given) of each payload image.
        :param payload_images: a list of image metadata for payload images
        :return: list of build records, list of images missing builds
        """
        tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
        brew_latest_builds = brew.get_latest_builds(tag_component_tuples, "image", self.brew_event, self.brew_session)
        # there's zero or one "latest" build in each list; flatten the data structure.
        brew_latest_builds = [builds[0] if builds else {} for builds in brew_latest_builds]

        # look up the archives for each image (to get the RPMs that went into them)
        brew_build_ids = [b["id"] if b else 0 for b in brew_latest_builds]
        archives_list = brew.list_archives_by_builds(brew_build_ids, "image", self.brew_session)

        # at this point payload_images, brew_latest_builds, and archives_list should be matching lists;
        # combine them into dict build records.
        latest_builds, missing_images = [], []
        for image, build, archives in zip(payload_images, brew_latest_builds, archives_list):
            if build and archives:
                latest_builds.append(BuildRecord(image, build, archives))
            else:
                missing_images.append(image)
                state.record_image_fail(self.state, image, f"Unable to find build for: {image.image_name_short}", self.runtime.logger)

        self.state["builds_missing"] = len(missing_images)
        self.state["builds_found"] = len(latest_builds)
        return latest_builds, missing_images
コード例 #2
0
    def find_embargoed_builds(self, builds: List[Dict]) -> Set[int]:
        """ find embargoes in given list of koji builds
        :param builds: a list of koji build dicts returned by the koji api
        :return: a set of build IDs that have embargoed fixes
        """
        # first, exclude all shipped builds
        self.logger and self.logger.info("Filtering out shipped builds...")
        shipped = self.find_shipped_builds([b["id"] for b in builds])
        suspects = [b for b in builds if b["id"] not in shipped]

        # second, if a build's release field includes .p1, it is embargoed
        embargoed = {b["id"] for b in suspects if ".p1" in b["release"]}

        # finally, look at the rpms in .p0 images in case they include unshipped .p1 rpms
        suspect_build_ids = {
            b["id"]
            for b in suspects if b["id"] not in embargoed
        }  # non .p1 build IDs

        build_ids = suspect_build_ids - self.archive_lists.keys(
        )  # a set of build IDs that are not in self.archive_lists cache
        if build_ids:
            build_ids = list(build_ids)
            self.logger and self.logger.info(
                f"Fetching image archives for {len(build_ids)} builds...")
            archive_lists = brew.list_archives_by_builds(
                build_ids, "image", self.koji_session
            )  # if a build is not an image (e.g. rpm), Brew will return an empty archive list for that build
            for index, archive_list in enumerate(archive_lists):
                self.archive_lists[
                    build_ids[index]] = archive_list  # save to cache
        suspect_archives = [
            ar for suspect in suspect_build_ids
            for ar in self.archive_lists[suspect]
        ]

        self.logger and self.logger.info(
            f'Fetching rpms in {len(suspect_archives)} images...')
        suspect_rpm_lists = brew.list_image_rpms(
            [ar["id"] for ar in suspect_archives], self.koji_session)
        for index, rpms in enumerate(suspect_rpm_lists):
            suspected_rpms = [
                rpm for rpm in rpms if ".p1" in rpm["release"]
            ]  # there should be a better way to checking the release field...
            shipped = self.find_shipped_builds(
                [rpm["build_id"] for rpm in suspected_rpms])
            embargoed_rpms = [
                rpm for rpm in suspected_rpms if rpm["build_id"] not in shipped
            ]
            if embargoed_rpms:
                image_build_id = suspect_archives[index]["build_id"]
                embargoed.add(image_build_id)
        return embargoed
コード例 #3
0
 def populate_archive_lists(self, suspect_build_ids: Set[int]):
     """ populate self.archive_lists with any build IDs not already cached
     :param suspect_build_ids: a list of koji build ids
     """
     build_ids = list(
         suspect_build_ids -
         self.archive_lists.keys())  # Only update cache with missing builds
     if build_ids:
         self.logger and self.logger.info(
             f"Fetching image archives for {len(build_ids)} builds...")
         archive_lists = brew.list_archives_by_builds(
             build_ids, "image", self.koji_session
         )  # if a build is not an image (e.g. rpm), Brew will return an empty archive list for that build
         for build_id, archive_list in zip(build_ids, archive_lists):
             self.archive_lists[build_id] = archive_list  # save to cache
コード例 #4
0
    def find_embargoed_builds(self, builds: List[Dict]) -> Set[int]:
        """ find embargoes in given list of koji builds
        :param builds: a list of koji build dicts returned by the koji api
        :return: a set of build IDs that have embargoed fixes
        """
        # first, exclude all shipped builds
        self.logger and self.logger.info("Filtering out shipped builds...")
        shipped = self.find_shipped_builds([b["id"] for b in builds])
        suspects = [b for b in builds if b["id"] not in shipped]

        # second, if a build's release field includes .p1, it is embargoed
        embargoed = {b["id"] for b in suspects if ".p1" in b["release"]}

        # finally, look at the rpms in .p0 images in case they include unshipped .p1 rpms
        suspect_build_ids = {
            b["id"]
            for b in suspects if b["id"] not in embargoed
        }  # non .p1 build IDs

        # look up any build IDs not already in self.archive_lists cache
        build_ids = list(suspect_build_ids - self.archive_lists.keys())
        if build_ids:
            self.logger and self.logger.info(
                f"Fetching image archives for {len(build_ids)} builds...")
            archive_lists = brew.list_archives_by_builds(
                build_ids, "image", self.koji_session
            )  # if a build is not an image (e.g. rpm), Brew will return an empty archive list for that build
            for build_id, archive_list in zip(build_ids, archive_lists):
                self.archive_lists[build_id] = archive_list  # save to cache

        # look for embargoed RPMs in the image archives (one per arch for every image)
        for suspect in suspect_build_ids:
            for archive in self.archive_lists[suspect]:
                rpms = archive["rpms"]
                suspected_rpms = [
                    rpm for rpm in rpms if ".p1" in rpm["release"]
                ]  # there should be a better way to check the release field...
                shipped = self.find_shipped_builds(
                    [rpm["build_id"] for rpm in suspected_rpms])
                embargoed_rpms = [
                    rpm for rpm in suspected_rpms
                    if rpm["build_id"] not in shipped
                ]
                if embargoed_rpms:
                    image_build_id = archive["build_id"]
                    embargoed.add(image_build_id)

        return embargoed
コード例 #5
0
    def from_images(
            self, image_map: Dict[str,
                                  ImageMetadata]) -> Dict[str, List[Dict]]:
        """ Returns RPM builds used in images
        :param image_map: Map of image_distgit_key -> ImageMetadata
        :return: a dict; keys are image distgit keys, values are lists of RPM build dicts
        """
        image_builds: Dict[str, Dict] = OrderedDict(
        )  # keys are image distgit keys, values are brew build dicts
        image_rpm_builds: Dict[str, List[Dict]] = OrderedDict(
        )  # rpms in images; keys are image distgit keys, values are rpm build dicts used in that image

        self._logger.info("Finding image builds...")
        for distgit_key, image_meta in image_map.items():
            build = image_meta.get_latest_build(default=None, honor_is=False)
            if build:  # Ignore None in case we build for an basis event that is prior to the first build of an image
                image_builds[distgit_key] = build

        self._logger.info("Finding RPMs used in %s image builds...",
                          len(image_builds))
        archive_lists = list_archives_by_builds(
            [b["build_id"] for b in image_builds.values()], "image",
            self._koji_api)

        rpm_build_ids = {
            rpm["build_id"]
            for archives in archive_lists for ar in archives
            for rpm in ar["rpms"]
        }
        self._logger.info("Querying Brew build infos for %s RPM builds...",
                          len(rpm_build_ids))
        rpm_builds = self._get_builds(rpm_build_ids)
        build_map = {b["build_id"]: b
                     for b in rpm_builds
                     }  # Maps rpm_build_id to build object from brew

        for distgit_key, archives in zip(image_builds.keys(), archive_lists):
            rpm_build_ids = {
                rpm["build_id"]
                for ar in archives for rpm in ar["rpms"]
            }
            image_rpm_builds[distgit_key] = [
                build_map[build_id] for build_id in rpm_build_ids
            ]
        return image_rpm_builds
コード例 #6
0
    def test_list_archives_by_builds(self):
        build_ids = [1, 2, 3, None, 4, 0, 5, None]
        expected = [
            [{"build_id": 1, "type_name": "tar", "arch": "x86_64", "btype": "image", "id": 1000000}],
            [{"build_id": 2, "type_name": "tar", "arch": "x86_64", "btype": "image", "id": 2000000}],
            [{"build_id": 3, "type_name": "tar", "arch": "x86_64", "btype": "image", "id": 3000000}],
            None,
            [{"build_id": 4, "type_name": "tar", "arch": "x86_64", "btype": "image", "id": 4000000}],
            None,
            [{"build_id": 5, "type_name": "tar", "arch": "x86_64", "btype": "image", "id": 5000000}],
            None,
        ]

        def fake_response(buildID, type):
            return mock.MagicMock(result=[{"build_id": buildID, "type_name": "tar", "arch": "x86_64", "btype": type, "id": buildID * 1000000}])

        fake_session = mock.MagicMock()
        fake_context_manager = fake_session.multicall.return_value.__enter__.return_value
        fake_context_manager.listArchives.side_effect = fake_response
        actual = brew.list_archives_by_builds(build_ids, "image", fake_session)
        self.assertListEqual(actual, expected)
コード例 #7
0
def gen_assembly_from_releases(ctx, runtime, nightlies, standards, custom):
    runtime.initialize(mode='both',
                       clone_distgits=False,
                       clone_source=False,
                       prevent_cloning=True)
    logger = runtime.logger
    gen_assembly_name = ctx.obj[
        'ASSEMBLY_NAME']  # The name of the assembly we are going to output

    # Create a map of package_name to RPMMetadata
    package_rpm_meta: Dict[str, RPMMetadata] = {
        rpm_meta.get_package_name(): rpm_meta
        for rpm_meta in runtime.rpm_metas()
    }

    def exit_with_error(msg):
        print(msg, file=sys.stderr)
        exit(1)

    if runtime.assembly != 'stream':
        exit_with_error(
            '--assembly must be "stream" in order to populate an assembly definition from nightlies'
        )

    if not nightlies and not standards:
        exit_with_error(
            'At least one release (--nightly or --standard) must be specified')

    if len(runtime.arches) != len(nightlies) + len(standards) and not custom:
        exit_with_error(
            f'Expected at least {len(runtime.arches)} nightlies; one for each group arch: {runtime.arches}'
        )

    reference_releases_by_arch: Dict[
        str, str] = dict()  # Maps brew arch name to nightly name
    mosc_by_arch: Dict[str, str] = dict(
    )  # Maps brew arch name to machine-os-content pullspec from nightly
    component_image_builds: Dict[str, BrewBuildImageInspector] = dict(
    )  # Maps component package_name to brew build dict found for nightly
    component_rpm_builds: Dict[str, Dict[int, Dict]] = dict(
    )  # Dict[ package_name ] -> Dict[ el? ] -> brew build dict
    basis_event_ts: float = 0.0

    release_pullspecs: Dict[str, str] = dict()
    for nightly_name in nightlies:
        major_minor, brew_cpu_arch, priv = util.isolate_nightly_name_components(
            nightly_name)
        if major_minor != runtime.get_minor_version():
            exit_with_error(
                f'Specified nightly {nightly_name} does not match group major.minor'
            )
        reference_releases_by_arch[brew_cpu_arch] = nightly_name
        rc_suffix = util.go_suffix_for_arch(brew_cpu_arch, priv)
        nightly_pullspec = f'registry.ci.openshift.org/ocp{rc_suffix}/release{rc_suffix}:{nightly_name}'
        if brew_cpu_arch in release_pullspecs:
            raise ValueError(
                f'Cannot process {nightly_name} since {release_pullspecs[brew_cpu_arch]} is already included'
            )
        release_pullspecs[brew_cpu_arch] = nightly_pullspec

    for standard_release_name in standards:
        version, brew_cpu_arch = standard_release_name.split(
            '-')  # 4.7.22-s390x => ['4.7.22', 's390x']
        major_minor = '.'.join(
            version.split('.')[:2]
        )  # isolate just x.y from version names like '4.77.22' and '4.8.0-rc.3'
        if major_minor != runtime.get_minor_version():
            exit_with_error(
                f'Specified release {standard_release_name} does not match group major.minor'
            )
        standard_pullspec = f'quay.io/openshift-release-dev/ocp-release:{standard_release_name}'
        if brew_cpu_arch in release_pullspecs:
            raise ValueError(
                f'Cannot process {standard_release_name} since {release_pullspecs[brew_cpu_arch]} is already included'
            )
        release_pullspecs[brew_cpu_arch] = standard_pullspec

    for brew_cpu_arch, pullspec in release_pullspecs.items():
        runtime.logger.info(f'Processing release: {pullspec}')

        release_json_str, _ = exectools.cmd_assert(
            f'oc adm release info {pullspec} -o=json', retries=3)
        release_info = Model(dict_to_model=json.loads(release_json_str))

        if not release_info.references.spec.tags:
            exit_with_error(
                f'Could not find any imagestream tags in release: {pullspec}')

        for component_tag in release_info.references.spec.tags:
            payload_tag_name = component_tag.name  # e.g. "aws-ebs-csi-driver"
            payload_tag_pullspec = component_tag['from'].name  # quay pullspec

            if payload_tag_name == 'machine-os-content':
                mosc_by_arch[brew_cpu_arch] = payload_tag_pullspec
                continue

            # The brew_build_inspector will take this archive image and find the actual
            # brew build which created it.
            brew_build_inspector = BrewBuildImageInspector(
                runtime, payload_tag_pullspec)
            package_name = brew_build_inspector.get_package_name()
            build_nvr = brew_build_inspector.get_nvr()
            if package_name in component_image_builds:
                # If we have already encountered this package once in the list of releases we are
                # processing, then make sure that the original NVR we found matches the new NVR.
                # We want the releases to be populated with identical builds.
                existing_nvr = component_image_builds[package_name].get_nvr()
                if build_nvr != existing_nvr:
                    exit_with_error(
                        f'Found disparate nvrs between releases; {existing_nvr} in processed and {build_nvr} in {pullspec}'
                    )
            else:
                # Otherwise, record the build as the first time we've seen an NVR for this
                # package.
                component_image_builds[package_name] = brew_build_inspector

            # We now try to determine a basis brew event that will
            # find this image during get_latest_build-like operations
            # for the assembly. At the time of this writing, metadata.get_latest_build
            # will only look for builds *completed* before the basis event. This could
            # be changed to *created* before the basis event in the future. However,
            # other logic that is used to find latest builds requires the build to be
            # tagged into an rhaos tag before the basis brew event.
            # To choose a safe / reliable basis brew event, we first find the
            # time at which a build was completed, then add 5 minutes.
            # That extra 5 minutes ensures brew will have had time to tag the
            # build appropriately for its build target. The 5 minutes is also
            # short enough to ensure that no other build of this image could have
            # completed before the basis event.

            completion_ts: float = brew_build_inspector.get_brew_build_dict(
            )['completion_ts']
            # If the basis event for this image is > the basis_event capable of
            # sweeping images we've already analyzed, increase the basis_event_ts.
            basis_event_ts = max(basis_event_ts, completion_ts + (60.0 * 5))

    # basis_event_ts should now be greater than the build completion / target tagging operation
    # for any (non machine-os-content) image in the nightlies. Because images are built after RPMs,
    # it must also hold that the basis_event_ts is also greater than build completion & tagging
    # of any member RPM.

    # Let's now turn the approximate basis_event_ts into a brew event number
    with runtime.shared_koji_client_session() as koji_api:
        basis_event = koji_api.getLastEvent(before=basis_event_ts)['id']

    logger.info(f'Estimated basis brew event: {basis_event}')
    logger.info(
        f'The following image package_names were detected in the specified releases: {component_image_builds.keys()}'
    )

    # That said, things happen. Let's say image component X was built in build X1 and X2.
    # Image component Y was build in Y1. Let's say that the ordering was X1, X2, Y1 and, for
    # whatever reason, we find X1 and Y1 in the user specified nightly. This means the basis_event_ts
    # we find for Y1 is going to find X2 instead of X1 if we used it as part of an assembly's basis event.

    # To avoid that, we now evaluate whether any images or RPMs defy our assumption that the nightly
    # corresponds to the basis_event_ts we have calculated. If we find something that will not be swept
    # correctly by the estimated basis event, we collect up the outliers (hopefully few in number) into
    # a list of packages which must be included in the assembly as 'is:'. This might happen if, for example,
    # an artist accidentally builds an image on the command line for the stream assembly; without this logic,
    # that build might be found by our basis event, but we will explicitly pin to the image in the nightly
    # component's NVR as an override in the assembly definition.
    force_is: Set[str] = set(
    )  # A set of package_names whose NVRs are not correctly sourced by the estimated basis_event
    for image_meta in runtime.image_metas():

        if image_meta.base_only or not image_meta.for_release:
            continue

        dgk = image_meta.distgit_key
        package_name = image_meta.get_component_name()
        basis_event_dict = image_meta.get_latest_build(
            default=None, complete_before_event=basis_event)
        if not basis_event_dict:
            exit_with_error(
                f'No image was found for assembly {runtime.assembly} for component {dgk} at estimated brew event {basis_event}. No normal reason for this to happen so exiting out of caution.'
            )

        basis_event_build_dict: BrewBuildImageInspector = BrewBuildImageInspector(
            runtime, basis_event_dict['id'])
        basis_event_build_nvr = basis_event_build_dict.get_nvr()

        if not image_meta.is_payload:
            # If this is not for the payload, the nightlies cannot have informed our NVR decision; just
            # pick whatever the estimated basis will pull and let the user know. If they want to change
            # it, they will need to pin it.
            logger.info(
                f'{dgk} non-payload build {basis_event_build_nvr} will be swept by estimated assembly basis event'
            )
            component_image_builds[package_name] = basis_event_build_dict
            continue

        # Otherwise, the image_meta is destined for the payload and analyzing the nightlies should
        # have given us an NVR which is expected to be selected by the assembly.

        if package_name not in component_image_builds:
            if custom:
                logger.warning(
                    f'Unable to find {dgk} in releases despite it being marked as is_payload in ART metadata; this may be because the image is not built for every arch or it is not labeled appropriately for the payload. Choosing what was in the estimated basis event sweep: {basis_event_build_nvr}'
                )
            else:
                logger.error(
                    f'Unable to find {dgk} in releases despite it being marked as is_payload in ART metadata; this may mean the image does not have the proper labeling for being in the payload. Choosing what was in the estimated basis event sweep: {basis_event_build_nvr}'
                )
            component_image_builds[package_name] = basis_event_build_dict
            continue

        ref_releases_component_build = component_image_builds[package_name]
        ref_nightlies_component_build_nvr = ref_releases_component_build.get_nvr(
        )

        if basis_event_build_nvr != ref_nightlies_component_build_nvr:
            logger.info(
                f'{dgk} build {basis_event_build_nvr} was selected by estimated basis event. That is not what is in the specified releases, so this image will be pinned.'
            )
            force_is.add(package_name)
            continue

        # Otherwise, the estimated basis event resolved the image nvr we found in the nightlies. The
        # image NVR does not need to be pinned. Yeah!
        pass

    # We should have found a machine-os-content for each architecture in the group for a standard assembly
    for arch in runtime.arches:
        if arch not in mosc_by_arch:
            if custom:
                # This is permitted for custom assemblies which do not need to be assembled for every
                # architecture. The customer may just need x86_64.
                logger.info(
                    f'Did not find machine-os-content image for active group architecture: {arch}; ignoring since this is custom.'
                )
            else:
                exit_with_error(
                    f'Did not find machine-os-content image for active group architecture: {arch}'
                )

    # We now have a list of image builds that should be selected by the assembly basis event
    # and those that will need to be forced with 'is'. We now need to perform a similar step
    # for RPMs. Look at the image contents, see which RPMs are in use. If we build them,
    # then the NVRs in the image must be selected by the estimated basis event. If they are
    # not, then we must pin the NVRs in the assembly definition.

    with runtime.shared_koji_client_session() as koji_api:

        archive_lists = brew.list_archives_by_builds(
            [b.get_brew_build_id() for b in component_image_builds.values()],
            "image", koji_api)
        rpm_build_ids = {
            rpm["build_id"]
            for archives in archive_lists for ar in archives
            for rpm in ar["rpms"]
        }
        logger.info("Querying Brew build information for %s RPM builds...",
                    len(rpm_build_ids))
        # We now have a list of all RPM builds which have been installed into the various images which
        # ART builds. Specifically the ART builds which went into composing the nightlies.
        ref_releases_rpm_builds: List[Dict] = brew.get_build_objects(
            rpm_build_ids, koji_api)

        for ref_releases_rpm_build in ref_releases_rpm_builds:
            package_name = ref_releases_rpm_build['package_name']
            if package_name in package_rpm_meta:  # Does ART build this package?
                rpm_meta = package_rpm_meta[package_name]
                dgk = rpm_meta.distgit_key
                rpm_build_nvr = ref_releases_rpm_build['nvr']
                # If so, what RHEL version is this build for?
                el_ver = util.isolate_el_version_in_release(
                    ref_releases_rpm_build['release'])
                if not el_ver:
                    exit_with_error(
                        f'Unable to isolate el? version in {rpm_build_nvr}')

                if package_name not in component_rpm_builds:
                    # If this is the first time we've seen this ART package, bootstrap a dict for its
                    # potentially different builds for different RHEL versions.
                    component_rpm_builds[package_name]: Dict[int,
                                                             Dict] = dict()

                if el_ver in component_rpm_builds[package_name]:
                    # We've already captured the build in our results
                    continue

                # Now it is time to see whether a query for the RPM from the basis event
                # estimate comes up with this RPM NVR.
                basis_event_build_dict = rpm_meta.get_latest_build(
                    el_target=el_ver, complete_before_event=basis_event)
                if not basis_event_build_dict:
                    exit_with_error(
                        f'No RPM was found for assembly {runtime.assembly} for component {dgk} at estimated brew event {basis_event}. No normal reason for this to happen so exiting out of caution.'
                    )

                if el_ver in component_rpm_builds[package_name]:
                    # We've already logged a build for this el version before
                    continue

                component_rpm_builds[package_name][
                    el_ver] = ref_releases_rpm_build
                basis_event_build_nvr = basis_event_build_dict['nvr']
                logger.info(
                    f'{dgk} build {basis_event_build_nvr} selected by scan against estimated basis event'
                )
                if basis_event_build_nvr != ref_releases_rpm_build['nvr']:
                    # The basis event estimate did not find the RPM from the nightlies. We have to pin the package.
                    logger.info(
                        f'{dgk} build {basis_event_build_nvr} was selected by estimated basis event. That is not what is in the specified releases, so this RPM will be pinned.'
                    )
                    force_is.add(package_name)

    # component_image_builds now contains a mapping of package_name -> BrewBuildImageInspector for all images that should be included
    # in the assembly.
    # component_rpm_builds now contains a mapping of package_name to different RHEL versions that should be included
    # in the assembly.
    # force_is is a set of package_names which were not successfully selected by the estimated basis event.

    image_member_overrides: List[Dict] = []
    rpm_member_overrides: List[Dict] = []
    for package_name in force_is:
        if package_name in component_image_builds:
            build_inspector: BrewBuildImageInspector = component_image_builds[
                package_name]
            dgk = build_inspector.get_image_meta().distgit_key
            image_member_overrides.append({
                'distgit_key': dgk,
                'why':
                'Query from assembly basis event failed to replicate referenced nightly content exactly. Pinning to replicate.',
                'metadata': {
                    'is': {
                        'nvr': build_inspector.get_nvr()
                    }
                }
            })
        elif package_name in component_rpm_builds:
            dgk = package_rpm_meta[package_name].distgit_key
            rpm_member_overrides.append({
                'distgit_key': dgk,
                'why':
                'Query from assembly basis event failed to replicate referenced nightly content exactly. Pinning to replicate.',
                'metadata': {
                    'is': {
                        f'el{el_ver}':
                        component_rpm_builds[package_name][el_ver]['nvr']
                        for el_ver in component_rpm_builds[package_name]
                    }
                }
            })

    group_info = {}
    if not custom:
        group_info['advisories'] = {
            'image': -1,
            'rpm': -1,
            'extras': -1,
            'metadata': -1,
        }
    else:
        # Custom payloads don't require advisories.
        # If the user has specified fewer nightlies than is required by this
        # group, then we need to override the group arches.
        group_info = {'arches!': list(mosc_by_arch.keys())}

    assembly_def = {
        'releases': {
            gen_assembly_name: {
                "assembly": {
                    'type': 'custom' if custom else 'standard',
                    'basis': {
                        'brew_event': basis_event,
                        'reference_releases': reference_releases_by_arch,
                    },
                    'group': group_info,
                    'rhcos': {
                        'machine-os-content': {
                            "images": mosc_by_arch,
                        }
                    },
                    'members': {
                        'rpms': rpm_member_overrides,
                        'images': image_member_overrides,
                    }
                }
            }
        }
    }

    print(yaml.dump(assembly_def))
コード例 #8
0
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.
    """
    runtime.initialize(clone_distgits=False, config_excludes='non_release')
    orgrepo = "{}/{}".format(organization, repository)
    cmd = runtime.command
    runtime.state[cmd] = dict(state.TEMPLATE_IMAGE)
    lstate = runtime.state[cmd]  # get local convenience copy

    if not is_name:
        is_name = default_is_base_name(runtime.get_minor_version())
    if not is_namespace:
        is_namespace = default_is_base_namespace()

    images = [i for i in runtime.image_metas()]
    lstate['total'] = len(images)

    no_build_items = []
    invalid_name_items = []

    payload_images = []
    for image in images:
        # Per clayton:
        """Tim Bielawa: note to self: is only for `ose-` prefixed images
        Clayton Coleman: Yes, Get with the naming system or get out of town
        """
        if image.is_payload:
            if not image.image_name_short.startswith("ose-"):
                invalid_name_items.append(image.image_name_short)
                red_print("NOT adding to IS (does not meet name/version conventions): {}".format(image.image_name_short))
                continue
            else:
                payload_images.append(image)

    runtime.logger.info("Fetching latest image builds from Brew...")
    tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
    brew_session = runtime.build_retrying_koji_client()
    latest_builds = brew.get_latest_builds(tag_component_tuples, "image", event_id, brew_session)
    latest_builds = [builds[0] if builds else None for builds in latest_builds]  # flatten the data structure

    runtime.logger.info("Fetching image archives...")
    build_ids = [b["id"] if b else 0 for b in latest_builds]
    archives_list = brew.list_archives_by_builds(build_ids, "image", brew_session)

    mismatched_siblings = find_mismatched_siblings(payload_images, latest_builds, archives_list, runtime.logger, lstate)

    embargoed_build_ids = set()  # a set of private image build ids
    if runtime.group_config.public_upstreams:
        # looking for embargoed image builds
        detector = embargo_detector.EmbargoDetector(brew_session, runtime.logger)
        for index, archive_list in enumerate(archives_list):
            if build_ids[index]:
                detector.archive_lists[build_ids[index]] = archive_list  # store to EmbargoDetector cache to limit Brew queries
        suspects = [b for b in latest_builds if b]
        embargoed_build_ids = detector.find_embargoed_builds(suspects)

    runtime.logger.info("Creating mirroring lists...")

    # These will map[arch] -> map[image_name] -> { version: version, release: release, image_src: image_src }
    mirroring = {}
    for i, image in enumerate(payload_images):
        latest_build = latest_builds[i]
        archives = archives_list[i]
        error = None
        if image.distgit_key in mismatched_siblings:
            error = "Siblings built from different commits"
        elif not (latest_build and archives):  # build or archive doesn't exist
            error = f"Unable to find build for: {image.image_name_short}"
            no_build_items.append(image.image_name_short)
        else:
            for archive in archives:
                arch = archive["arch"]
                pullspecs = archive["extra"]["docker"]["repositories"]
                if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                    error = f"Unable to find pullspecs for: {image.image_name_short}"
                    red_print(error, file=sys.stderr)
                    state.record_image_fail(lstate, image, error, runtime.logger)
                    break
                # The tag that will be used in the imagestreams
                tag_name = image.image_name_short
                tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                    raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")
                mirroring_value = {'version': latest_build["version"], 'release': latest_build["release"], 'image_src': pullspecs[-1], 'digest': digest}
                embargoed = latest_build["id"] in embargoed_build_ids  # when public_upstreams are not configured, this is always false
                if not embargoed:  # exclude embargoed images from the ocp[-arch] imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(arch, {})[tag_name] = mirroring_value
                else:
                    red_print(f"Found embargoed image {pullspecs[-1]}")
                if runtime.group_config.public_upstreams:
                    # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(f"{arch}-priv", {})[tag_name] = mirroring_value
        if not error:
            state.record_image_success(lstate, image)
        else:
            red_print(error, file=sys.stderr)
            state.record_image_fail(lstate, image, error, runtime.logger)

    for key in mirroring:
        private = key.endswith("-priv")
        arch = key[:-5] if private else key  # strip `-priv` suffix

        mirror_filename = 'src_dest.{}'.format(key)
        imagestream_filename = 'image_stream.{}'.format(key)
        target_is_name, target_is_namespace = is_name_and_space(is_name, is_namespace, arch, private)

        def build_dest_name(tag_name):
            entry = mirroring[key][tag_name]
            tag = entry["digest"].replace(":", "-")  # sha256:abcdef -> sha256-abcdef
            return f"quay.io/{orgrepo}:{tag}"

        # Save the default SRC=DEST 'oc image mirror' input to a file for
        # later.
        with io.open(mirror_filename, 'w+', encoding="utf-8") as out_file:
            for tag_name in mirroring[key]:
                dest = build_dest_name(tag_name)
                out_file.write("{}={}\n".format(mirroring[key][tag_name]['image_src'], dest))

        with io.open("{}.yaml".format(imagestream_filename), 'w+', encoding="utf-8") as out_file:
            # Add a tag spec to the image stream. The name of each tag
            # spec does not include the 'ose-' prefix. This keeps them
            # consistent between OKD and OCP

            # Template Base Image Stream object.
            tag_list = []
            isb = {
                'kind': 'ImageStream',
                'apiVersion': 'image.openshift.io/v1',
                'metadata': {
                    'name': target_is_name,
                    'namespace': target_is_namespace,
                },
                'spec': {
                    'tags': tag_list,
                }
            }

            for tag_name in mirroring[key]:
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': build_dest_name(tag_name)
                    }
                })

            # mirroring rhcos
            runtime.logger.info(f"Getting latest RHCOS pullspec for {target_is_name}...")
            mosc_istag = _latest_mosc_istag(runtime, arch, private)
            if mosc_istag:
                tag_list.append(mosc_istag)

            # Not all images are built for non-x86 arches (e.g. kuryr), but they
            # may be mentioned in image references. Thus, make sure there is a tag
            # for every tag we find in x86_64 and provide just a dummy image.
            if 'cli' not in mirroring[key]:  # `cli` serves as the dummy image for the replacement
                if runtime.group_config.public_upstreams and not private:  # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
                    runtime.logger.warning(f"Unable to find cli tag from {key} imagestream. Is `cli` image embargoed?")
                else:  # if CVE embargoes supporting is disabled or the "cli" image is also missing in *-priv namespaces, an error will be raised.
                    raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))
            else:
                extra_tags = mirroring['x86_64-priv' if private else 'x86_64'].keys() - mirroring[key].keys()
                for tag_name in extra_tags:
                    yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                    tag_list.append({
                        'name': tag_name,
                        'from': {
                            'kind': 'DockerImage',
                            'name': build_dest_name('cli')  # cli is always built and is harmless
                        }
                    })

            yaml.safe_dump(isb, out_file, indent=2, default_flow_style=False)

    if no_build_items:
        yellow_print("No builds found for:")
        for img in sorted(no_build_items):
            click.echo("   {}".format(img))

    if invalid_name_items:
        yellow_print("Images skipped due to invalid naming:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))

    if mismatched_siblings:
        yellow_print("Images skipped due to siblings mismatch:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))