def _get_latest_builds(self, payload_images):
        """
        find latest brew build (at event, if given) of each payload image.
        :param payload_images: a list of image metadata for payload images
        :return: list of build records, list of images missing builds
        """
        tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
        brew_latest_builds = brew.get_latest_builds(tag_component_tuples, "image", self.brew_event, self.brew_session)
        # there's zero or one "latest" build in each list; flatten the data structure.
        brew_latest_builds = [builds[0] if builds else {} for builds in brew_latest_builds]

        # look up the archives for each image (to get the RPMs that went into them)
        brew_build_ids = [b["id"] if b else 0 for b in brew_latest_builds]
        archives_list = brew.list_archives_by_builds(brew_build_ids, "image", self.brew_session)

        # at this point payload_images, brew_latest_builds, and archives_list should be matching lists;
        # combine them into dict build records.
        latest_builds, missing_images = [], []
        for image, build, archives in zip(payload_images, brew_latest_builds, archives_list):
            if build and archives:
                latest_builds.append(BuildRecord(image, build, archives))
            else:
                missing_images.append(image)
                state.record_image_fail(self.state, image, f"Unable to find build for: {image.image_name_short}", self.runtime.logger)

        self.state["builds_missing"] = len(missing_images)
        self.state["builds_found"] = len(latest_builds)
        return latest_builds, missing_images
Beispiel #2
0
    def test_get_latest_builds(self):
        tag_component_tuples = [
            ("faketag1", "component1"),
            ("faketag2", "component2"),
            ("faketag2", None),
            ("faketag1", "component4"),
            ("", "component5"),
            ("faketag2", "component6"),
        ]
        expected = [
            [{"name": "component1", "nvr": "component1-v1.0.0-1.faketag1"}],
            [{"name": "component2", "nvr": "component2-v1.0.0-1.faketag2"}],
            [{"name": "a", "nvr": "a-v1.0.0-1.faketag2"}, {"name": "b", "nvr": "b-v1.0.0-1.faketag2"}],
            [{"name": "component4", "nvr": "component4-v1.0.0-1.faketag1"}],
            None,
            [{"name": "component6", "nvr": "component6-v1.0.0-1.faketag2"}],
        ]

        def fake_response(tag, event=None, package=None, type=None):
            packages = [package] if package else ["a", "b"]
            return mock.MagicMock(result=[{"name": pkg, "nvr": f"{pkg}-v1.0.0-1.{tag}"} for pkg in packages])

        fake_session = mock.MagicMock()
        fake_context_manager = fake_session.multicall.return_value.__enter__.return_value
        fake_context_manager.getLatestBuilds.side_effect = fake_response
        actual = brew.get_latest_builds(tag_component_tuples, None, None, fake_session)
        self.assertListEqual(actual, expected)
Beispiel #3
0
    def assert_golang_versions(self):
        """ Assert all buildroots have consistent versions of golang compilers
        """
        check_mode = self.runtime.group_config.check_golang_versions or "x.y"  # no: do not check; x.y: only major and minor version; exact: the z-version must be the same
        if check_mode == "no":
            return

        # populate target_golangs with information from Brew
        with RPMMetadata.target_golangs_lock:
            uncached_targets = set(self.targets) - RPMMetadata.target_golangs.keys()
        if uncached_targets:
            uncached_targets = list(uncached_targets)
            self.logger.debug(f"Querying golang compiler versions for targets {uncached_targets}...")
            brew_session = self.runtime.build_retrying_koji_client()
            # get buildroots for uncached targets
            with brew_session.multicall(strict=True) as m:
                tasks = [m.getBuildTarget(target) for target in uncached_targets]
            buildroots = [task.result["build_tag_name"] for task in tasks]
            # get latest build of golang compiler for each buildroot
            golang_components = ["golang", "golang-scl-shim"]
            for target, buildroot in zip(uncached_targets, buildroots):
                latest_builds = brew.get_latest_builds([(buildroot, component) for component in golang_components], "rpm", None, brew_session)
                latest_builds = [builds[0] for builds in latest_builds if builds]  # flatten latest_builds
                # It is possible that a buildroot has multiple golang compiler packages (golang and golang-scl-shim) tagged in.
                # We need to find the maximum version in each buildroot.
                max_golang_nevr = None
                for build in latest_builds:
                    nevr = (build["name"], build["epoch"], build["version"], build["release"])
                    if max_golang_nevr is None or rpm.labelCompare(nevr[1:], max_golang_nevr[1:]) > 0:
                        max_golang_nevr = nevr
                if max_golang_nevr is None:
                    raise DoozerFatalError(f"Buildroot {buildroot} doesn't contain any golang compiler packages.")
                if max_golang_nevr[0] == "golang-scl-shim":
                    # golang-scl-shim is not an actual compiler but an adaptor to make go-toolset look like golang for an RPM build.
                    # We need to check the actual go-toolset build it requires.
                    # See https://source.redhat.com/groups/public/atomicopenshift/atomicopenshift_wiki/what_art_needs_to_know_about_golang#jive_content_id_golangsclshim
                    major, minor = max_golang_nevr[2].split(".")[:2]
                    go_toolset_builds = brew_session.getLatestBuilds(buildroot, package=f"go-toolset-{major}.{minor}", type="rpm")
                    if not go_toolset_builds:
                        raise DoozerFatalError(f"Buildroot {buildroot} doesn't have go-toolset-{major}.{minor} tagged in.")
                    max_golang_nevr = (go_toolset_builds[0]["name"], go_toolset_builds[0]["epoch"], go_toolset_builds[0]["version"], go_toolset_builds[0]["release"])
                with RPMMetadata.target_golangs_lock:
                    RPMMetadata.target_golangs[target] = max_golang_nevr

        # assert all buildroots have the same version of golang compilers
        it = iter(self.targets)
        first_target = next(it)
        with RPMMetadata.target_golangs_lock:
            first_nevr = RPMMetadata.target_golangs[first_target]
            for target in it:
                nevr = RPMMetadata.target_golangs[target]
                if (check_mode == "exact" and nevr[2] != first_nevr[2]) or (check_mode == "x.y" and nevr[2].split(".")[:2] != first_nevr[2].split(".")[:2]):
                    raise DoozerFatalError(f"Buildroot for target {target} has inconsistent golang compiler version {nevr[2]} while target {first_target} has {first_nevr[2]}.")
Beispiel #4
0
def detect_embargoes_in_tags(runtime: Runtime, kind: str,
                             included_tags: List[str],
                             excluded_tags: List[str],
                             event_id: Optional[int]):
    """ Finds embargoes in builds with given tags
    :param runtime: the runtime
    :param included_tags: list of koji tags that the returned builds must have
    :param excluded_tags: list of koji tags that the returned builds must not have
    :return: list of Brew build dicts that have embargoed fixes
    """
    brew_session = runtime.build_retrying_koji_client()
    runtime.logger.info(f"Fetching builds from Brew tags {included_tags}...")
    build_type = None if kind == "all" else kind
    latest_build_lists = brew.get_latest_builds([(tag, None)
                                                 for tag in included_tags],
                                                build_type, event_id,
                                                brew_session)
    included_builds = [
        b for builds in latest_build_lists if builds for b in builds
    ]  # flatten latest_build_lists
    runtime.logger.info(
        f"Found {len(included_builds)} builds from Brew tags {included_tags}.")
    if included_builds and excluded_tags:  # if we have tags to exclude, get all builds with excluded_tags then exclude them
        runtime.logger.info(
            f"Fetching builds from Brew tags {excluded_tags}...")
        excluded_build_lists = brew.get_tagged_builds(
            [(tag, None) for tag in excluded_tags], build_type, event_id,
            brew_session)
        excluded_build_ids = {
            b["id"]
            for builds in excluded_build_lists if builds for b in builds
        }
        builds = [
            b for b in included_builds if b["id"] not in excluded_build_ids
        ]
        runtime.logger.info(
            f"Excluded {len(included_builds) - len(builds)} builds that are also tagged into {excluded_tags}."
        )
        included_builds = builds

    # Builds may have duplicate entries if we query from multiple tags. Don't worry, BuildStatusDetector is smart.
    runtime.logger.info(
        f"Detecting embargoes for {len(included_builds)} builds...")
    detector = bs_detector.BuildStatusDetector(runtime, runtime.logger)
    embargoed_build_ids = detector.find_embargoed_builds(
        included_builds, runtime.get_candidate_brew_tags())
    embargoed_builds = [
        b for b in included_builds if b["id"] in embargoed_build_ids
    ]
    return embargoed_builds
Beispiel #5
0
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.
    """
    runtime.initialize(clone_distgits=False, config_excludes='non_release')
    orgrepo = "{}/{}".format(organization, repository)
    cmd = runtime.command
    runtime.state[cmd] = dict(state.TEMPLATE_IMAGE)
    lstate = runtime.state[cmd]  # get local convenience copy

    if not is_name:
        is_name = default_is_base_name(runtime.get_minor_version())
    if not is_namespace:
        is_namespace = default_is_base_namespace()

    images = [i for i in runtime.image_metas()]
    lstate['total'] = len(images)

    no_build_items = []
    invalid_name_items = []

    payload_images = []
    for image in images:
        # Per clayton:
        """Tim Bielawa: note to self: is only for `ose-` prefixed images
        Clayton Coleman: Yes, Get with the naming system or get out of town
        """
        if image.is_payload:
            if not image.image_name_short.startswith("ose-"):
                invalid_name_items.append(image.image_name_short)
                red_print("NOT adding to IS (does not meet name/version conventions): {}".format(image.image_name_short))
                continue
            else:
                payload_images.append(image)

    runtime.logger.info("Fetching latest image builds from Brew...")
    tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
    brew_session = runtime.build_retrying_koji_client()
    latest_builds = brew.get_latest_builds(tag_component_tuples, "image", event_id, brew_session)
    latest_builds = [builds[0] if builds else None for builds in latest_builds]  # flatten the data structure

    runtime.logger.info("Fetching image archives...")
    build_ids = [b["id"] if b else 0 for b in latest_builds]
    archives_list = brew.list_archives_by_builds(build_ids, "image", brew_session)

    mismatched_siblings = find_mismatched_siblings(payload_images, latest_builds, archives_list, runtime.logger, lstate)

    embargoed_build_ids = set()  # a set of private image build ids
    if runtime.group_config.public_upstreams:
        # looking for embargoed image builds
        detector = embargo_detector.EmbargoDetector(brew_session, runtime.logger)
        for index, archive_list in enumerate(archives_list):
            if build_ids[index]:
                detector.archive_lists[build_ids[index]] = archive_list  # store to EmbargoDetector cache to limit Brew queries
        suspects = [b for b in latest_builds if b]
        embargoed_build_ids = detector.find_embargoed_builds(suspects)

    runtime.logger.info("Creating mirroring lists...")

    # These will map[arch] -> map[image_name] -> { version: version, release: release, image_src: image_src }
    mirroring = {}
    for i, image in enumerate(payload_images):
        latest_build = latest_builds[i]
        archives = archives_list[i]
        error = None
        if image.distgit_key in mismatched_siblings:
            error = "Siblings built from different commits"
        elif not (latest_build and archives):  # build or archive doesn't exist
            error = f"Unable to find build for: {image.image_name_short}"
            no_build_items.append(image.image_name_short)
        else:
            for archive in archives:
                arch = archive["arch"]
                pullspecs = archive["extra"]["docker"]["repositories"]
                if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                    error = f"Unable to find pullspecs for: {image.image_name_short}"
                    red_print(error, file=sys.stderr)
                    state.record_image_fail(lstate, image, error, runtime.logger)
                    break
                # The tag that will be used in the imagestreams
                tag_name = image.image_name_short
                tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                    raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")
                mirroring_value = {'version': latest_build["version"], 'release': latest_build["release"], 'image_src': pullspecs[-1], 'digest': digest}
                embargoed = latest_build["id"] in embargoed_build_ids  # when public_upstreams are not configured, this is always false
                if not embargoed:  # exclude embargoed images from the ocp[-arch] imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(arch, {})[tag_name] = mirroring_value
                else:
                    red_print(f"Found embargoed image {pullspecs[-1]}")
                if runtime.group_config.public_upstreams:
                    # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(f"{arch}-priv", {})[tag_name] = mirroring_value
        if not error:
            state.record_image_success(lstate, image)
        else:
            red_print(error, file=sys.stderr)
            state.record_image_fail(lstate, image, error, runtime.logger)

    for key in mirroring:
        private = key.endswith("-priv")
        arch = key[:-5] if private else key  # strip `-priv` suffix

        mirror_filename = 'src_dest.{}'.format(key)
        imagestream_filename = 'image_stream.{}'.format(key)
        target_is_name, target_is_namespace = is_name_and_space(is_name, is_namespace, arch, private)

        def build_dest_name(tag_name):
            entry = mirroring[key][tag_name]
            tag = entry["digest"].replace(":", "-")  # sha256:abcdef -> sha256-abcdef
            return f"quay.io/{orgrepo}:{tag}"

        # Save the default SRC=DEST 'oc image mirror' input to a file for
        # later.
        with io.open(mirror_filename, 'w+', encoding="utf-8") as out_file:
            for tag_name in mirroring[key]:
                dest = build_dest_name(tag_name)
                out_file.write("{}={}\n".format(mirroring[key][tag_name]['image_src'], dest))

        with io.open("{}.yaml".format(imagestream_filename), 'w+', encoding="utf-8") as out_file:
            # Add a tag spec to the image stream. The name of each tag
            # spec does not include the 'ose-' prefix. This keeps them
            # consistent between OKD and OCP

            # Template Base Image Stream object.
            tag_list = []
            isb = {
                'kind': 'ImageStream',
                'apiVersion': 'image.openshift.io/v1',
                'metadata': {
                    'name': target_is_name,
                    'namespace': target_is_namespace,
                },
                'spec': {
                    'tags': tag_list,
                }
            }

            for tag_name in mirroring[key]:
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': build_dest_name(tag_name)
                    }
                })

            # mirroring rhcos
            runtime.logger.info(f"Getting latest RHCOS pullspec for {target_is_name}...")
            mosc_istag = _latest_mosc_istag(runtime, arch, private)
            if mosc_istag:
                tag_list.append(mosc_istag)

            # Not all images are built for non-x86 arches (e.g. kuryr), but they
            # may be mentioned in image references. Thus, make sure there is a tag
            # for every tag we find in x86_64 and provide just a dummy image.
            if 'cli' not in mirroring[key]:  # `cli` serves as the dummy image for the replacement
                if runtime.group_config.public_upstreams and not private:  # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
                    runtime.logger.warning(f"Unable to find cli tag from {key} imagestream. Is `cli` image embargoed?")
                else:  # if CVE embargoes supporting is disabled or the "cli" image is also missing in *-priv namespaces, an error will be raised.
                    raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))
            else:
                extra_tags = mirroring['x86_64-priv' if private else 'x86_64'].keys() - mirroring[key].keys()
                for tag_name in extra_tags:
                    yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                    tag_list.append({
                        'name': tag_name,
                        'from': {
                            'kind': 'DockerImage',
                            'name': build_dest_name('cli')  # cli is always built and is harmless
                        }
                    })

            yaml.safe_dump(isb, out_file, indent=2, default_flow_style=False)

    if no_build_items:
        yellow_print("No builds found for:")
        for img in sorted(no_build_items):
            click.echo("   {}".format(img))

    if invalid_name_items:
        yellow_print("Images skipped due to invalid naming:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))

    if mismatched_siblings:
        yellow_print("Images skipped due to siblings mismatch:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))