Пример #1
0
    def _extra_dummy_tags(self, arch, private, source_for_name, x86_source_for_name, target):
        """
        For non-x86 arches, not all images are built (e.g. kuryr), but they may
        be mentioned in CVO image references. Thus, make sure there is a tag for
        every tag we find in x86_64 and provide a dummy image to stand in if needed.

        :return: a list of tag specs for the payload images not built in this arch.
        """
        tag_list = []
        if 'cli' in source_for_name:  # `cli` serves as the dummy image for the replacement
            extra_tags = x86_source_for_name.keys() - source_for_name.keys()
            for tag_name in extra_tags:
                yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': self._build_dest_name(source_for_name['cli'], target.orgrepo)
                    }
                })
        elif self.runtime.group_config.public_upstreams and not private:
            # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
            self.runtime.logger.warning(f"Unable to find cli tag from {arch} imagestream. Is `cli` image embargoed?")
        else:
            # if CVE embargoes supporting is disabled or the "cli" image is also
            # missing in *-priv namespaces, an error will be raised.
            raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))

        return tag_list
Пример #2
0
    def _get_mirror_sources(self, latest_builds, mismatched_siblings):
        """
        Determine the image sources to mirror to each arch-private-specific imagestream,
        excluding mismatched siblings; also record success/failure per state.

        :return: map[(arch, private)] -> map[image_name] -> { version: release: image_src: digest: build_record: }
        """
        mirroring = {}
        for record in latest_builds:
            image = record.image
            error = None
            if image.distgit_key in mismatched_siblings:
                error = "Siblings built from different commits"
            else:
                for archive in record.archives:
                    arch = archive["arch"]
                    pullspecs = archive["extra"]["docker"]["repositories"]
                    if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                        error = f"Unable to find pullspecs for: {image.image_name_short}"
                        red_print(error)
                        state.record_image_fail(self.state, image, error, self.runtime.logger)
                        continue
                    # The tag that will be used in the imagestreams
                    tag_name = image.image_name_short
                    tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                    digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                    if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                        raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")

                    mirroring_value = dict(
                        version=record.build["version"],
                        release=record.build["release"],
                        image_src=pullspecs[-1],
                        digest=digest,
                        build_record=record,
                        archive=archive,
                    )

                    if record.private:  # exclude embargoed images from the ocp[-arch] imagestreams
                        yellow_print(f"Omitting embargoed image {pullspecs[-1]}")
                    else:
                        self.runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                        mirroring.setdefault((arch, False), {})[tag_name] = mirroring_value

                    if self.runtime.group_config.public_upstreams:
                        # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                        self.runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                        mirroring.setdefault((arch, True), {})[tag_name] = mirroring_value

            # per build, record in the state whether we can successfully mirror it
            if error:
                red_print(error)
                state.record_image_fail(self.state, image, error, self.runtime.logger)
            else:
                state.record_image_success(self.state, image)

        return mirroring
Пример #3
0
def cli(ctx, **kwargs):
    global CTX_GLOBAL
    kwargs[
        'global_opts'] = None  # can only be set in settings.yaml, add manually

    # This section mostly for containerizing doozer
    # It allows the user to simply place settings.yaml into their working dir
    # and then mount that working dir into the container.
    # The container automatically sets DOOZER_WORKING_DIR
    # Having settings.yaml in the user directory would overcomplicate this
    # Note: This means that having working_dir in that config would override everything
    wd = None
    wd_env = cli_opts.CLI_OPTS['working_dir']['env']
    config_path_override = None

    # regardless of the container using the ENV var, always respect
    # --working-dir above all else
    if kwargs['working_dir']:
        wd = kwargs['working_dir']
    elif wd_env in os.environ:
        wd = os.environ[wd_env]

    # only if settings.yaml exists in the workspace force dotconfig
    # to override the usual flow. Otherwise this will fall back to
    # potentially getting working-dir from ~/.config/doozer/settings.yaml
    if wd and os.path.isfile(os.path.join(wd, 'settings.yaml')):
        config_path_override = wd

    cfg = dotconfig.Config('doozer',
                           'settings',
                           template=cli_opts.CLI_CONFIG_TEMPLATE,
                           envvars=cli_opts.CLI_ENV_VARS,
                           cli_args=kwargs,
                           path_override=config_path_override)

    if cli_opts.config_is_empty(cfg.full_path):
        msg = ("It appears you may be using Doozer for the first time.\n"
               "Be sure to setup Doozer using the user config file:\n"
               "{}\n").format(cfg.full_path)
        yellow_print(msg)

    # set global option defaults
    runtime_args = cfg.to_dict()
    global_opts = runtime_args['global_opts']
    if global_opts is None:
        global_opts = {}
    for k, v in cli_opts.GLOBAL_OPT_DEFAULTS.items():
        if k not in global_opts or global_opts[k] is None:
            global_opts[k] = v
    runtime_args['global_opts'] = global_opts

    ctx.obj = Runtime(cfg_obj=cfg,
                      command=ctx.invoked_subcommand,
                      **runtime_args)
    CTX_GLOBAL = ctx
    return ctx
Пример #4
0
def _latest_mosc_istag(runtime, arch, private):
    try:
        version = runtime.get_minor_version()
        _, pullspec = rhcos.latest_machine_os_content(version, arch, private)
        if not pullspec:
            yellow_print(f"No RHCOS found for {version} arch={arch} private={private}")
            return None
    except Exception as ex:
        yellow_print(f"error finding RHCOS: {ex}")
        return None

    return {
        'name': "machine-os-content",
        'from': {
            'kind': 'DockerImage',
            'name': pullspec
        }
    }
Пример #5
0
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.

## Validation ##

Additionally we want to check that the following conditions are true for each
imagestream being updated:

* For all architectures built, RHCOS builds must have matching versions of any
  unshipped RPM they include (per-entry os metadata - the set of RPMs may differ
  between arches, but versions should not).
* Any RPMs present in images (including machine-os-content) from unshipped RPM
  builds included in one of our candidate tags must exactly version-match the
  latest RPM builds in those candidate tags (ONLY; we never flag what we don't
  directly ship.)

These checks (and likely more in the future) should run and any failures should
be listed in brief via a "release.openshift.io/inconsistency" annotation on the
relevant image istag (these are publicly visible; ref. https://bit.ly/37cseC1)
and in more detail in state.yaml. The release-controller, per ART-2195, will
read and propagate/expose this annotation in its display of the release image.
    """
    runtime.initialize(clone_distgits=False)
    brew_session = runtime.build_retrying_koji_client()
    base_target = SyncTarget(  # where we will mirror and record the tags
        orgrepo=f"{organization}/{repository}",
        istream_name=is_name if is_name else default_is_base_name(runtime.get_minor_version()),
        istream_namespace=is_namespace if is_namespace else default_is_base_namespace()
    )

    gen = PayloadGenerator(runtime, brew_session, event_id, base_target)
    latest_builds, invalid_name_items, images_missing_builds, mismatched_siblings, non_release_items = gen.load_latest_builds()
    gen.write_mirror_destinations(latest_builds, mismatched_siblings)

    if non_release_items:
        yellow_print("Images skipped due to non_release tag:")
        for img in sorted(non_release_items):
            click.echo("   {}".format(img))

    if invalid_name_items:
        yellow_print("Images skipped due to invalid naming:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))

    if images_missing_builds:
        yellow_print("No builds found for:")
        for img in sorted(images_missing_builds):
            click.echo("   {}".format(img))

    if mismatched_siblings:
        yellow_print("Images skipped due to siblings mismatch:")
        for img in sorted(mismatched_siblings):
            click.echo("   {}".format(img))
Пример #6
0
def images_streams_prs(runtime, github_access_token, bug, interstitial,
                       ignore_ci_master, draft_prs, moist_run, add_labels):
    runtime.initialize(clone_distgits=False, clone_source=False)
    g = Github(login_or_token=github_access_token)
    github_user = g.get_user()

    major = runtime.group_config.vars['MAJOR']
    minor = runtime.group_config.vars['MINOR']
    interstitial = int(interstitial)

    master_major, master_minor = extract_version_fields(what_is_in_master(),
                                                        at_least=2)
    if not ignore_ci_master and (major > master_major or minor > master_minor):
        # ART building a release before is is in master. Too early to open PRs.
        runtime.logger.warning(
            f'Target {major}.{minor} has not been in master yet (it is tracking {master_major}.{master_minor}); skipping PRs'
        )
        exit(0)

    prs_in_master = (major == master_major
                     and minor == master_minor) and not ignore_ci_master

    pr_links = {}  # map of distgit_key to PR URLs associated with updates
    new_pr_links = {}
    skipping_dgks = set(
    )  # If a distgit key is skipped, it children will see it in this list and skip themselves.
    for image_meta in runtime.ordered_image_metas():
        dgk = image_meta.distgit_key
        logger = image_meta.logger
        logger.info('Analyzing image')

        alignment_prs_config = image_meta.config.content.source.ci_alignment.streams_prs

        if alignment_prs_config and alignment_prs_config.enabled is not Missing and not alignment_prs_config.enabled:
            # Make sure this is an explicit False. Missing means the default or True.
            logger.info('The image has alignment PRs disabled; ignoring')
            continue

        from_config = image_meta.config['from']
        if not from_config:
            logger.info('Skipping PRs since there is no configured .from')
            continue

        desired_parents = []
        builders = from_config.builder or []
        for builder in builders:
            upstream_image = resolve_upstream_from(runtime, builder)
            if not upstream_image:
                logger.warning(
                    f'Unable to resolve upstream image for: {builder}')
                break
            desired_parents.append(upstream_image)

        parent_upstream_image = resolve_upstream_from(runtime, from_config)
        if len(desired_parents) != len(builders) or not parent_upstream_image:
            logger.warning(
                'Unable to find all ART equivalent upstream images for this image'
            )
            continue

        desired_parents.append(parent_upstream_image)
        desired_parent_digest = calc_parent_digest(desired_parents)
        logger.info(
            f'Found desired FROM state of: {desired_parents} with digest: {desired_parent_digest}'
        )

        source_repo_url, source_repo_branch = _get_upstream_source(
            runtime, image_meta)

        if not source_repo_url:
            # No upstream to clone; no PRs to open
            continue

        public_repo_url, public_branch = runtime.get_public_upstream(
            source_repo_url)
        if not public_branch:
            public_branch = source_repo_branch

        # There are two standard upstream branching styles:
        # release-4.x   : CI fast-forwards from master when appropriate
        # openshift-4.x : Upstream team manages completely.
        # For the former style, we may need to open the PRs against master.
        # For the latter style, always open directly against named branch
        if public_branch.startswith('release-') and prs_in_master:
            # TODO: auto-detect default branch for repo instead of assuming master
            public_branch = 'master'

        _, org, repo_name = split_git_url(public_repo_url)

        public_source_repo = g.get_repo(f'{org}/{repo_name}')

        try:
            fork_repo_name = f'{github_user.login}/{repo_name}'
            fork_repo = g.get_repo(fork_repo_name)
        except UnknownObjectException:
            # Repo doesn't exist; fork it
            fork_repo = github_user.create_fork(public_source_repo)

        fork_branch_name = f'art-consistency-{runtime.group_config.name}-{dgk}'
        fork_branch_head = f'{github_user.login}:{fork_branch_name}'

        fork_branch = None
        try:
            fork_branch = fork_repo.get_branch(fork_branch_name)
        except UnknownObjectException:
            # Doesn't presently exist and will need to be created
            pass
        except GithubException as ge:
            # This API seems to return 404 instead of UnknownObjectException.
            # So allow 404 to pass through as well.
            if ge.status != 404:
                raise

        public_repo_url = convert_remote_git_to_ssh(public_repo_url)
        clone_dir = os.path.join(runtime.working_dir, 'clones', dgk)
        # Clone the private url to make the best possible use of our doozer_cache
        runtime.git_clone(source_repo_url, clone_dir)

        with Dir(clone_dir):
            exectools.cmd_assert(f'git remote add public {public_repo_url}')
            exectools.cmd_assert(
                f'git remote add fork {convert_remote_git_to_ssh(fork_repo.git_url)}'
            )
            exectools.cmd_assert('git fetch --all')

            # The path to the Dockerfile in the target branch
            if image_meta.config.content.source.dockerfile is not Missing:
                # Be aware that this attribute sometimes contains path elements too.
                dockerfile_name = image_meta.config.content.source.dockerfile
            else:
                dockerfile_name = "Dockerfile"

            df_path = Dir.getpath()
            if image_meta.config.content.source.path:
                dockerfile_name = os.path.join(
                    image_meta.config.content.source.path, dockerfile_name)

            df_path = df_path.joinpath(dockerfile_name)

            fork_branch_parent_digest = None
            fork_branch_parents = None
            if fork_branch:
                # If there is already an art reconciliation branch, get an MD5
                # of the FROM images in the Dockerfile in that branch.
                exectools.cmd_assert(f'git checkout fork/{fork_branch_name}')
                fork_branch_parent_digest, fork_branch_parents = extract_parent_digest(
                    df_path)

            # Now change over to the target branch in the actual public repo
            exectools.cmd_assert(f'git checkout public/{public_branch}')

            source_branch_parent_digest, source_branch_parents = extract_parent_digest(
                df_path)

            if desired_parent_digest == source_branch_parent_digest:
                green_print(
                    'Desired digest and source digest match; Upstream is in a good state'
                )
                continue

            yellow_print(
                f'Upstream dockerfile does not match desired state in {public_repo_url}/blob/{public_branch}/{dockerfile_name}'
            )
            print(
                f'Desired parents: {desired_parents} ({desired_parent_digest})'
            )
            print(
                f'Source parents: {source_branch_parents} ({source_branch_parent_digest})'
            )
            print(
                f'Fork branch digest: {fork_branch_parents} ({fork_branch_parent_digest})'
            )

            first_commit_line = f"Updating {image_meta.name} builder & base images to be consistent with ART"
            reconcile_info = f"Reconciling with {convert_remote_git_to_https(runtime.gitdata.origin_url)}/tree/{runtime.gitdata.commit_hash}/images/{os.path.basename(image_meta.config_filename)}"

            diff_text = None
            if fork_branch_parent_digest != desired_parent_digest:
                # The fork branch either does not exist, or does not have the desired parent image state
                # Let's create a local branch that will contain the Dockerfile in the state we desire.
                work_branch_name = '__mod'
                exectools.cmd_assert(f'git checkout public/{public_branch}')
                exectools.cmd_assert(f'git checkout -b {work_branch_name}')
                with df_path.open(mode='r+') as handle:
                    dfp = DockerfileParser(cache_content=True,
                                           fileobj=io.BytesIO())
                    dfp.content = handle.read()
                    dfp.parent_images = desired_parents
                    handle.truncate(0)
                    handle.seek(0)
                    handle.write(dfp.content)

                diff_text, _ = exectools.cmd_assert(f'git diff {str(df_path)}')

                if not moist_run:
                    exectools.cmd_assert(f'git add {str(df_path)}')
                    commit_prefix = ''
                    if repo_name.startswith('kubernetes'):
                        # couple repos have this requirement; openshift/kubernetes & openshift/kubernetes-autoscaler.
                        # This check may suffice  for now, but it may eventually need to be in doozer metadata.
                        commit_prefix = 'UPSTREAM: <carry>: '
                    commit_msg = f"""{commit_prefix}{first_commit_line}
{reconcile_info}
"""
                    exectools.cmd_assert(
                        f'git commit -m "{commit_msg}"'
                    )  # Add a commit atop the public branch's current state
                    # Create or update the remote fork branch
                    exectools.cmd_assert(
                        f'git push --force fork {work_branch_name}:{fork_branch_name}'
                    )

            # At this point, we have a fork branch in the proper state
            pr_body = f"""{first_commit_line}
{reconcile_info}

If you have any questions about this pull request, please reach out to `@art-team` in the `#aos-art` coreos slack channel.
"""

            parent_pr_url = None
            parent_meta = image_meta.resolve_parent()
            if parent_meta:
                if parent_meta.distgit_key in skipping_dgks:
                    skipping_dgks.add(image_meta.distgit_key)
                    yellow_print(
                        f'Image has parent {parent_meta.distgit_key} which was skipped; skipping self: {image_meta.distgit_key}'
                    )
                    continue

                parent_pr_url = pr_links.get(parent_meta.distgit_key, None)
                if parent_pr_url:
                    if parent_meta.config.content.source.ci_alignment.streams_prs.merge_first:
                        skipping_dgks.add(image_meta.distgit_key)
                        yellow_print(
                            f'Image has parent {parent_meta.distgit_key} open PR ({parent_pr_url}) and streams_prs.merge_first==True; skipping PR opening for this image {image_meta.distgit_key}'
                        )
                        continue

                    # If the parent has an open PR associated with it, make sure the
                    # child PR notes that the parent PR should merge first.
                    pr_body += f'\nDepends on {parent_pr_url} . Allow it to merge and then run `/test all` on this PR.'

            # Let's see if there is a PR opened
            open_prs = list(
                public_source_repo.get_pulls(state='open',
                                             head=fork_branch_head))
            if open_prs:
                existing_pr = open_prs[0]
                # Update body, but never title; The upstream team may need set something like a Bug XXXX: there.
                # Don't muck with it.

                if alignment_prs_config.auto_label and add_labels:
                    # If we are to automatically add labels to this upstream PR, do so.
                    existing_pr.set_labels(*alignment_prs_config.auto_label)

                existing_pr.edit(body=pr_body)
                pr_url = existing_pr.html_url
                pr_links[dgk] = pr_url
                yellow_print(
                    f'A PR is already open requesting desired reconciliation with ART: {pr_url}'
                )
                continue

            # Otherwise, we need to create a pull request
            if moist_run:
                pr_links[dgk] = f'MOIST-RUN-PR:{dgk}'
                green_print(
                    f'Would have opened PR against: {public_source_repo.html_url}/blob/{public_branch}/{dockerfile_name}.'
                )
                if parent_pr_url:
                    green_print(
                        f'Would have identified dependency on PR: {parent_pr_url}.'
                    )
                if diff_text:
                    yellow_print(diff_text)
                else:
                    yellow_print(
                        f'Fork from which PR would be created ({fork_branch_head}) is populated with desired state.'
                    )
            else:
                pr_title = first_commit_line
                if bug:
                    pr_title = f'Bug {bug}: {pr_title}'
                new_pr = public_source_repo.create_pull(title=pr_title,
                                                        body=pr_body,
                                                        base=public_branch,
                                                        head=fork_branch_head,
                                                        draft=draft_prs)
                if alignment_prs_config.auto_label and add_labels:
                    # If we are to automatically add labels to this upstream PR, do so.
                    new_pr.set_labels(*alignment_prs_config.auto_label)
                pr_msg = f'A new PR has been opened: {new_pr.html_url}'
                pr_links[dgk] = new_pr.html_url
                new_pr_links[dgk] = new_pr.html_url
                logger.info(pr_msg)
                yellow_print(pr_msg)
                print(
                    f'Sleeping {interstitial} seconds before opening another PR to prevent flooding prow...'
                )
                time.sleep(interstitial)

    if new_pr_links:
        print('Newly opened PRs:')
        print(yaml.safe_dump(new_pr_links))

    if pr_links:
        print('Currently open PRs:')
        print(yaml.safe_dump(pr_links))
Пример #7
0
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.
    """
    runtime.initialize(clone_distgits=False, config_excludes='non_release')
    orgrepo = "{}/{}".format(organization, repository)
    cmd = runtime.command
    runtime.state[cmd] = dict(state.TEMPLATE_IMAGE)
    lstate = runtime.state[cmd]  # get local convenience copy

    if not is_name:
        is_name = default_is_base_name(runtime.get_minor_version())
    if not is_namespace:
        is_namespace = default_is_base_namespace()

    images = [i for i in runtime.image_metas()]
    lstate['total'] = len(images)

    no_build_items = []
    invalid_name_items = []

    payload_images = []
    for image in images:
        # Per clayton:
        """Tim Bielawa: note to self: is only for `ose-` prefixed images
        Clayton Coleman: Yes, Get with the naming system or get out of town
        """
        if image.is_payload:
            if not image.image_name_short.startswith("ose-"):
                invalid_name_items.append(image.image_name_short)
                red_print("NOT adding to IS (does not meet name/version conventions): {}".format(image.image_name_short))
                continue
            else:
                payload_images.append(image)

    runtime.logger.info("Fetching latest image builds from Brew...")
    tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
    brew_session = runtime.build_retrying_koji_client()
    latest_builds = brew.get_latest_builds(tag_component_tuples, "image", event_id, brew_session)
    latest_builds = [builds[0] if builds else None for builds in latest_builds]  # flatten the data structure

    runtime.logger.info("Fetching image archives...")
    build_ids = [b["id"] if b else 0 for b in latest_builds]
    archives_list = brew.list_archives_by_builds(build_ids, "image", brew_session)

    mismatched_siblings = find_mismatched_siblings(payload_images, latest_builds, archives_list, runtime.logger, lstate)

    embargoed_build_ids = set()  # a set of private image build ids
    if runtime.group_config.public_upstreams:
        # looking for embargoed image builds
        detector = embargo_detector.EmbargoDetector(brew_session, runtime.logger)
        for index, archive_list in enumerate(archives_list):
            if build_ids[index]:
                detector.archive_lists[build_ids[index]] = archive_list  # store to EmbargoDetector cache to limit Brew queries
        suspects = [b for b in latest_builds if b]
        embargoed_build_ids = detector.find_embargoed_builds(suspects)

    runtime.logger.info("Creating mirroring lists...")

    # These will map[arch] -> map[image_name] -> { version: version, release: release, image_src: image_src }
    mirroring = {}
    for i, image in enumerate(payload_images):
        latest_build = latest_builds[i]
        archives = archives_list[i]
        error = None
        if image.distgit_key in mismatched_siblings:
            error = "Siblings built from different commits"
        elif not (latest_build and archives):  # build or archive doesn't exist
            error = f"Unable to find build for: {image.image_name_short}"
            no_build_items.append(image.image_name_short)
        else:
            for archive in archives:
                arch = archive["arch"]
                pullspecs = archive["extra"]["docker"]["repositories"]
                if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                    error = f"Unable to find pullspecs for: {image.image_name_short}"
                    red_print(error, file=sys.stderr)
                    state.record_image_fail(lstate, image, error, runtime.logger)
                    break
                # The tag that will be used in the imagestreams
                tag_name = image.image_name_short
                tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                    raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")
                mirroring_value = {'version': latest_build["version"], 'release': latest_build["release"], 'image_src': pullspecs[-1], 'digest': digest}
                embargoed = latest_build["id"] in embargoed_build_ids  # when public_upstreams are not configured, this is always false
                if not embargoed:  # exclude embargoed images from the ocp[-arch] imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(arch, {})[tag_name] = mirroring_value
                else:
                    red_print(f"Found embargoed image {pullspecs[-1]}")
                if runtime.group_config.public_upstreams:
                    # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(f"{arch}-priv", {})[tag_name] = mirroring_value
        if not error:
            state.record_image_success(lstate, image)
        else:
            red_print(error, file=sys.stderr)
            state.record_image_fail(lstate, image, error, runtime.logger)

    for key in mirroring:
        private = key.endswith("-priv")
        arch = key[:-5] if private else key  # strip `-priv` suffix

        mirror_filename = 'src_dest.{}'.format(key)
        imagestream_filename = 'image_stream.{}'.format(key)
        target_is_name, target_is_namespace = is_name_and_space(is_name, is_namespace, arch, private)

        def build_dest_name(tag_name):
            entry = mirroring[key][tag_name]
            tag = entry["digest"].replace(":", "-")  # sha256:abcdef -> sha256-abcdef
            return f"quay.io/{orgrepo}:{tag}"

        # Save the default SRC=DEST 'oc image mirror' input to a file for
        # later.
        with io.open(mirror_filename, 'w+', encoding="utf-8") as out_file:
            for tag_name in mirroring[key]:
                dest = build_dest_name(tag_name)
                out_file.write("{}={}\n".format(mirroring[key][tag_name]['image_src'], dest))

        with io.open("{}.yaml".format(imagestream_filename), 'w+', encoding="utf-8") as out_file:
            # Add a tag spec to the image stream. The name of each tag
            # spec does not include the 'ose-' prefix. This keeps them
            # consistent between OKD and OCP

            # Template Base Image Stream object.
            tag_list = []
            isb = {
                'kind': 'ImageStream',
                'apiVersion': 'image.openshift.io/v1',
                'metadata': {
                    'name': target_is_name,
                    'namespace': target_is_namespace,
                },
                'spec': {
                    'tags': tag_list,
                }
            }

            for tag_name in mirroring[key]:
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': build_dest_name(tag_name)
                    }
                })

            # mirroring rhcos
            runtime.logger.info(f"Getting latest RHCOS pullspec for {target_is_name}...")
            mosc_istag = _latest_mosc_istag(runtime, arch, private)
            if mosc_istag:
                tag_list.append(mosc_istag)

            # Not all images are built for non-x86 arches (e.g. kuryr), but they
            # may be mentioned in image references. Thus, make sure there is a tag
            # for every tag we find in x86_64 and provide just a dummy image.
            if 'cli' not in mirroring[key]:  # `cli` serves as the dummy image for the replacement
                if runtime.group_config.public_upstreams and not private:  # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
                    runtime.logger.warning(f"Unable to find cli tag from {key} imagestream. Is `cli` image embargoed?")
                else:  # if CVE embargoes supporting is disabled or the "cli" image is also missing in *-priv namespaces, an error will be raised.
                    raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))
            else:
                extra_tags = mirroring['x86_64-priv' if private else 'x86_64'].keys() - mirroring[key].keys()
                for tag_name in extra_tags:
                    yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                    tag_list.append({
                        'name': tag_name,
                        'from': {
                            'kind': 'DockerImage',
                            'name': build_dest_name('cli')  # cli is always built and is harmless
                        }
                    })

            yaml.safe_dump(isb, out_file, indent=2, default_flow_style=False)

    if no_build_items:
        yellow_print("No builds found for:")
        for img in sorted(no_build_items):
            click.echo("   {}".format(img))

    if invalid_name_items:
        yellow_print("Images skipped due to invalid naming:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))

    if mismatched_siblings:
        yellow_print("Images skipped due to siblings mismatch:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))