コード例 #1
0
    def resolve_metadata(self):
        """
        The group control data can be on a local filesystem, in a git
        repository that can be checked out, or some day in a database

        If the scheme is empty, assume file:///...
        Allow http, https, ssh and ssh+git (all valid git clone URLs)
        """

        if self.data_path is None:
            raise DoozerFatalError(
                ("No metadata path provided. Must be set via one of:\n"
                 "* data_path key in {}\n"
                 "* doozer --data-path [PATH|URL]\n"
                 "* Environment variable DOOZER_DATA_PATH\n").format(
                     self.cfg_obj.full_path))

        try:
            self.gitdata = gitdata.GitData(data_path=self.data_path,
                                           clone_dir=self.working_dir,
                                           branch=self.group,
                                           logger=self.logger)
            self.data_dir = self.gitdata.data_dir
        except gitdata.GitDataException as ex:
            raise DoozerFatalError(ex.message)
コード例 #2
0
    def assert_golang_versions(self):
        """ Assert all buildroots have consistent versions of golang compilers
        """
        check_mode = self.runtime.group_config.check_golang_versions or "x.y"  # no: do not check; x.y: only major and minor version; exact: the z-version must be the same
        if check_mode == "no":
            return

        # populate target_golangs with information from Brew
        with RPMMetadata.target_golangs_lock:
            uncached_targets = set(self.targets) - RPMMetadata.target_golangs.keys()
        if uncached_targets:
            uncached_targets = list(uncached_targets)
            self.logger.debug(f"Querying golang compiler versions for targets {uncached_targets}...")
            brew_session = self.runtime.build_retrying_koji_client()
            # get buildroots for uncached targets
            with brew_session.multicall(strict=True) as m:
                tasks = [m.getBuildTarget(target) for target in uncached_targets]
            buildroots = [task.result["build_tag_name"] for task in tasks]
            # get latest build of golang compiler for each buildroot
            golang_components = ["golang", "golang-scl-shim"]
            for target, buildroot in zip(uncached_targets, buildroots):
                latest_builds = brew.get_latest_builds([(buildroot, component) for component in golang_components], "rpm", None, brew_session)
                latest_builds = [builds[0] for builds in latest_builds if builds]  # flatten latest_builds
                # It is possible that a buildroot has multiple golang compiler packages (golang and golang-scl-shim) tagged in.
                # We need to find the maximum version in each buildroot.
                max_golang_nevr = None
                for build in latest_builds:
                    nevr = (build["name"], build["epoch"], build["version"], build["release"])
                    if max_golang_nevr is None or rpm.labelCompare(nevr[1:], max_golang_nevr[1:]) > 0:
                        max_golang_nevr = nevr
                if max_golang_nevr is None:
                    raise DoozerFatalError(f"Buildroot {buildroot} doesn't contain any golang compiler packages.")
                if max_golang_nevr[0] == "golang-scl-shim":
                    # golang-scl-shim is not an actual compiler but an adaptor to make go-toolset look like golang for an RPM build.
                    # We need to check the actual go-toolset build it requires.
                    # See https://source.redhat.com/groups/public/atomicopenshift/atomicopenshift_wiki/what_art_needs_to_know_about_golang#jive_content_id_golangsclshim
                    major, minor = max_golang_nevr[2].split(".")[:2]
                    go_toolset_builds = brew_session.getLatestBuilds(buildroot, package=f"go-toolset-{major}.{minor}", type="rpm")
                    if not go_toolset_builds:
                        raise DoozerFatalError(f"Buildroot {buildroot} doesn't have go-toolset-{major}.{minor} tagged in.")
                    max_golang_nevr = (go_toolset_builds[0]["name"], go_toolset_builds[0]["epoch"], go_toolset_builds[0]["version"], go_toolset_builds[0]["release"])
                with RPMMetadata.target_golangs_lock:
                    RPMMetadata.target_golangs[target] = max_golang_nevr

        # assert all buildroots have the same version of golang compilers
        it = iter(self.targets)
        first_target = next(it)
        with RPMMetadata.target_golangs_lock:
            first_nevr = RPMMetadata.target_golangs[first_target]
            for target in it:
                nevr = RPMMetadata.target_golangs[target]
                if (check_mode == "exact" and nevr[2] != first_nevr[2]) or (check_mode == "x.y" and nevr[2].split(".")[:2] != first_nevr[2].split(".")[:2]):
                    raise DoozerFatalError(f"Buildroot for target {target} has inconsistent golang compiler version {nevr[2]} while target {first_target} has {first_nevr[2]}.")
コード例 #3
0
async def _rpms_build(runtime: Runtime, scratch: bool, dry_run: bool):
    runtime.initialize(mode='rpms', clone_source=False,
                       clone_distgits=False)  # We will clone distgits later.
    if runtime.local:
        raise DoozerFatalError("Local RPM build is not currently supported.")

    runtime.assert_mutation_is_permitted()

    rpms: List[RPMMetadata] = runtime.rpm_metas()
    if not rpms:
        runtime.logger.error("No RPMs found. Check the arguments.")
        exit(0)

    with runtime.shared_koji_client_session() as koji_api:
        if not koji_api.logged_in:
            koji_api.gssapi_login()

    builder = RPMBuilder(runtime, dry_run=dry_run, scratch=scratch)
    tasks = [
        asyncio.ensure_future(_build_rpm(runtime, builder, rpm))
        for rpm in rpms
    ]

    results = await asyncio.gather(*tasks, return_exceptions=True)
    failed = [rpms[i].distgit_key for i, r in enumerate(results) if r != 0]
    if failed:
        runtime.logger.error("\n".join(["Build failures:"] + sorted(failed)))
        return 1
    return 0
コード例 #4
0
    def _extra_dummy_tags(self, arch, private, source_for_name, x86_source_for_name, target):
        """
        For non-x86 arches, not all images are built (e.g. kuryr), but they may
        be mentioned in CVO image references. Thus, make sure there is a tag for
        every tag we find in x86_64 and provide a dummy image to stand in if needed.

        :return: a list of tag specs for the payload images not built in this arch.
        """
        tag_list = []
        if 'cli' in source_for_name:  # `cli` serves as the dummy image for the replacement
            extra_tags = x86_source_for_name.keys() - source_for_name.keys()
            for tag_name in extra_tags:
                yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': self._build_dest_name(source_for_name['cli'], target.orgrepo)
                    }
                })
        elif self.runtime.group_config.public_upstreams and not private:
            # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
            self.runtime.logger.warning(f"Unable to find cli tag from {arch} imagestream. Is `cli` image embargoed?")
        else:
            # if CVE embargoes supporting is disabled or the "cli" image is also
            # missing in *-priv namespaces, an error will be raised.
            raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))

        return tag_list
コード例 #5
0
ファイル: rpmcfg.py プロジェクト: shiywang/doozer
    def _run_modifications(self):
        """
        Interprets and applies content.source.modify steps in the image metadata.
        """
        with open(self.specfile, 'r') as df:
            specfile_data = df.read()

        self.logger.debug(
            "About to start modifying spec file [{}]:\n{}\n".format(
                self.name, specfile_data))

        for modification in self.config.content.source.modifications:
            if modification.action == "replace":
                match = modification.match
                assert (match is not Missing)
                replacement = modification.replacement
                assert (replacement is not Missing)
                pre = specfile_data
                specfile_data = pre.replace(match, replacement)
                if specfile_data == pre:
                    raise DoozerFatalError(
                        "{}: Replace ({}->{}) modification did not make a change to the spec file content"
                        .format(self.name, match, replacement))
                self.logger.debug(
                    "Performed string replace '%s' -> '%s':\n%s\n" %
                    (match, replacement, specfile_data))
            else:
                raise IOError(
                    "%s: Don't know how to perform modification action: %s" %
                    (self.distgit_key, modification.action))

        with open(self.specfile, 'w') as df:
            df.write(specfile_data)
コード例 #6
0
 def resolve_image(self, distgit_name, required=True):
     if distgit_name not in self.image_map:
         if not required:
             return None
         raise DoozerFatalError(
             "Unable to find image metadata in group / included images: %s"
             % distgit_name)
     return self.image_map[distgit_name]
コード例 #7
0
 def assert_mutation_is_permitted(self):
     """
     In group.yml, it is possible to instruct doozer to prevent all builds / mutation of distgits.
     Call this method if you are about to mutate anything. If builds are disabled, an exception will
     be thrown.
     """
     if self.freeze_automation == FREEZE_AUTOMATION_YES:
         raise DoozerFatalError(
             'Automation (builds / mutations) for this group is currently frozen (freeze_automation set to {}). Coordinate with the group owner to change this if you believe it is incorrect.'
             .format(FREEZE_AUTOMATION_YES))
コード例 #8
0
    def detect_remote_source_branch(self, source_details):
        """Find a configured source branch that exists, or raise DoozerFatalError. Returns branch name and git hash"""
        git_url = source_details["url"]
        branches = source_details["branch"]

        branch = branches["target"]
        fallback_branch = branches.get("fallback", None)
        if self.group_config.use_source_fallback_branch == "always" and fallback_branch:
            # only use the fallback (unless none is given)
            branch, fallback_branch = fallback_branch, None
        elif self.group_config.use_source_fallback_branch == "never":
            # ignore the fallback
            fallback_branch = None
        stage_branch = branches.get("stage", None) if self.stage else None

        if stage_branch:
            self.logger.info(
                'Normal branch overridden by --stage option, using "{}"'.
                format(stage_branch))
            result = self._get_remote_branch_ref(git_url, stage_branch)
            if result:
                return stage_branch, result
            raise DoozerFatalError(
                '--stage option specified and no stage branch named "{}" exists for {}'
                .format(stage_branch, git_url))

        result = self._get_remote_branch_ref(git_url, branch)
        if result:
            return branch, result
        elif not fallback_branch:
            raise DoozerFatalError(
                'Requested target branch {} does not exist and no fallback provided'
                .format(branch))

        self.logger.info(
            'Target branch does not exist in {}, checking fallback branch {}'.
            format(git_url, fallback_branch))
        result = self._get_remote_branch_ref(git_url, fallback_branch)
        if result:
            return fallback_branch, result
        raise DoozerFatalError(
            'Requested fallback branch {} does not exist'.format(branch))
コード例 #9
0
async def _rpms_rebase_and_build(runtime: Runtime, version: str, release: str,
                                 embargoed: bool, scratch: bool,
                                 dry_run: bool):
    if version.startswith('v'):
        version = version[1:]

    runtime.initialize(mode='rpms', clone_source=False,
                       clone_distgits=False)  # We will clone distgits later.
    if runtime.local:
        raise DoozerFatalError("Local RPM build is not currently supported.")
    if runtime.group_config.public_upstreams and (release is None or
                                                  not release.endswith(".p?")):
        raise click.BadParameter(
            "You must explicitly specify a `release` ending with `.p?` when there is a public upstream mapping in ocp-build-data."
        )

    runtime.assert_mutation_is_permitted()

    rpms: List[RPMMetadata] = runtime.rpm_metas()
    if not rpms:
        runtime.logger.error("No RPMs found. Check the arguments.")
        exit(0)

    if embargoed:
        for rpm in rpms:
            rpm.private_fix = True

    with runtime.shared_koji_client_session() as koji_api:
        if not koji_api.logged_in:
            koji_api.gssapi_login()

    builder = RPMBuilder(runtime, dry_run=dry_run, scratch=scratch)

    async def _rebase_and_build(rpm: RPMMetadata):
        status = await _rebase_rpm(runtime, builder, rpm, version, release)
        if status != 0:
            return status
        status = await _build_rpm(runtime, builder, rpm)
        return status

    tasks = [asyncio.ensure_future(_rebase_and_build(rpm)) for rpm in rpms]
    results = await asyncio.gather(*tasks, return_exceptions=True)
    failed = [rpms[i].distgit_key for i, r in enumerate(results) if r != 0]
    if failed:
        runtime.logger.error("\n".join(["Build failures:"] + sorted(failed)))
        return 1
    return 0
コード例 #10
0
def detect_embargoes_in_nvrs(runtime: Runtime, nvrs: List[str]):
    """ Finds embargoes in given NVRs
    :param runtime: the runtime
    :param nvrs: list of build NVRs
    :return: list of Brew build dicts that have embargoed fixes
    """
    runtime.logger.info(f"Fetching {len(nvrs)} builds from Brew...")
    brew_session = runtime.build_retrying_koji_client()
    builds = brew.get_build_objects(nvrs, brew_session)
    for i, b in enumerate(builds):
        if not b:
            raise DoozerFatalError(f"Unable to get {nvrs[i]} from Brew.")
    runtime.logger.info(f"Detecting embargoes for {len(nvrs)} builds...")
    detector = embargo_detector.EmbargoDetector(brew_session, runtime.logger)
    embargoed_build_ids = detector.find_embargoed_builds(builds)
    embargoed_builds = [b for b in builds if b["id"] in embargoed_build_ids]
    return embargoed_builds
コード例 #11
0
    def late_resolve_image(self, distgit_name, add=False):
        """Resolve image and retrieve meta, optionally adding to image_map.
        If image not found, error will be thrown"""

        if distgit_name in self.image_map:
            return self.image_map[distgit_name]

        replace_vars = {}
        if self.group_config.vars:
            replace_vars = self.group_config.vars.primitive()
        data_obj = self.gitdata.load_data(path='images',
                                          key=distgit_name,
                                          replace_vars=replace_vars)
        if not data_obj:
            raise DoozerFatalError(
                'Unable to resovle image metadata for {}'.format(distgit_name))

        meta = ImageMetadata(self, data_obj)
        if add:
            self.image_map[distgit_name] = meta
        return meta
コード例 #12
0
    def act(self, *args, **kwargs):
        """ Run the modification action

        :param context: A context dict. `context.component_name` is the dist-git repo name,
            and `context.content` is the content of Dockerfile or RPM spec file.
        """
        context = kwargs["context"]
        content = context["content"]
        component_name = context["component_name"]
        match = self.match
        assert (match is not Missing)
        replacement = self.replacement
        assert (replacement is not Missing)
        if replacement is None:  # Nothing follows colon in config yaml; user attempting to remove string
            replacement = ""
        pre = content
        post = pre.replace(match, replacement)
        if post == pre:
            raise DoozerFatalError(
                "{}: Replace ({}->{}) modification did not make a change to the Dockerfile content"
                .format(component_name, match, replacement))
        LOGGER.debug("Performed string replace '%s' -> '%s':\n%s\n" %
                     (match, replacement, post))
        context["result"] = post
コード例 #13
0
    def resolve_source(self, parent, meta):
        """
        Looks up a source alias and returns a path to the directory containing
        that source. Sources can be specified on the command line, or, failing
        that, in group.yml.
        If a source specified in group.yaml has not be resolved before,
        this method will clone that source to checkout the group's desired
        branch before returning a path to the cloned repo.
        :param parent: Name of parent the source belongs to
        :param meta: The MetaData object to resolve source for
        :return: Returns the source path
        """

        source = meta.config.content.source

        # This allows passing `--source <distgit_key> path` to
        # override any source to something local without it
        # having been configured for an alias
        if self.local and meta.distgit_key in self.source_paths:
            source['alias'] = meta.distgit_key
            if 'git' in source:
                del source['git']

        source_details = None
        if 'git' in source:
            git_url = urlparse.urlparse(source.git.url)
            name = os.path.splitext(os.path.basename(git_url.path))[0]
            alias = '{}_{}'.format(parent, name)
            source_details = dict(source.git)
        elif 'alias' in source:
            alias = source.alias
        else:
            raise DoozerFatalError(
                'Error while processing source for {}'.format(parent))

        self.logger.debug(
            "Resolving local source directory for alias {}".format(alias))
        if alias in self.source_paths:
            self.logger.debug(
                "returning previously resolved path for alias {}: {}".format(
                    alias, self.source_paths[alias]))
            return self.source_paths[alias]

        # Where the source will land, check early so we know if old or new style
        sub_path = '{}{}'.format('global_' if source_details is None else '',
                                 alias)
        source_dir = os.path.join(self.sources_dir, sub_path)

        if not source_details:  # old style alias was given
            if (self.group_config.sources is Missing
                    or alias not in self.group_config.sources):
                raise DoozerFatalError(
                    "Source alias not found in specified sources or in the current group: %s"
                    % alias)
            source_details = self.group_config.sources[alias]

        self.logger.debug(
            "checking for source directory in source_dir: {}".format(
                source_dir))

        # If this source has already been extracted for this working directory
        if os.path.isdir(source_dir):
            # Store so that the next attempt to resolve the source hits the map
            self.source_paths[alias] = source_dir
            self.logger.info(
                "Source '{}' already exists in (skipping clone): {}".format(
                    alias, source_dir))
            return source_dir

        clone_branch, _ = self.detect_remote_source_branch(source_details)

        url = source_details["url"]
        try:
            self.logger.info(
                "Attempting to checkout source '%s' branch %s in: %s" %
                (url, clone_branch, source_dir))
            exectools.cmd_assert(
                # get a little history to enable finding a recent Dockerfile change, but not too much.
                "git clone -b {} --single-branch {} --depth 50 {}".format(
                    clone_branch, url, source_dir),
                retries=3,
                on_retry=["rm", "-rf", source_dir],
            )
        except IOError as e:
            self.logger.info("Unable to checkout branch {}: {}".format(
                clone_branch, e.message))
            raise DoozerFatalError(
                "Error checking out target branch of source '%s' in: %s" %
                (alias, source_dir))

        # Store so that the next attempt to resolve the source hits the map
        self.register_source_alias(alias, source_dir)
        return source_dir
コード例 #14
0
    def initialize(self,
                   mode='images',
                   clone_distgits=True,
                   validate_content_sets=False,
                   no_group=False,
                   clone_source=True,
                   disabled=None,
                   config_excludes=None):

        if self.initialized:
            return

        if self.quiet and self.verbose:
            click.echo("Flags --quiet and --verbose are mutually exclusive")
            exit(1)

        # We could mark these as required and the click library would do this for us,
        # but this seems to prevent getting help from the various commands (unless you
        # specify the required parameters). This can probably be solved more cleanly, but TODO
        if not no_group and self.group is None:
            click.echo("Group must be specified")
            exit(1)

        if self.working_dir is None:
            self.working_dir = tempfile.mkdtemp(".tmp", "oit-")
            # This can be set to False by operations which want the working directory to be left around
            self.remove_tmp_working_dir = True
            atexit.register(remove_tmp_working_dir, self)
        else:
            self.working_dir = os.path.abspath(
                os.path.expanduser(self.working_dir))
            if not os.path.isdir(self.working_dir):
                os.makedirs(self.working_dir)

        self.distgits_dir = os.path.join(self.working_dir, "distgits")
        if not os.path.isdir(self.distgits_dir):
            os.mkdir(self.distgits_dir)

        self.distgits_diff_dir = os.path.join(self.working_dir,
                                              "distgits-diffs")
        if not os.path.isdir(self.distgits_diff_dir):
            os.mkdir(self.distgits_diff_dir)

        self.sources_dir = os.path.join(self.working_dir, "sources")
        if not os.path.isdir(self.sources_dir):
            os.mkdir(self.sources_dir)

        if disabled is not None:
            self.load_disabled = disabled

        self.initialize_logging()

        self.init_state()

        if no_group:
            return  # nothing past here should be run without a group

        self.resolve_metadata()

        self.record_log_path = os.path.join(self.working_dir, "record.log")
        self.record_log = open(self.record_log_path, 'a')
        atexit.register(close_file, self.record_log)

        # Directory where brew-logs will be downloaded after a build
        self.brew_logs_dir = os.path.join(self.working_dir, "brew-logs")
        if not os.path.isdir(self.brew_logs_dir):
            os.mkdir(self.brew_logs_dir)

        # Directory for flags between invocations in the same working-dir
        self.flags_dir = os.path.join(self.working_dir, "flags")
        if not os.path.isdir(self.flags_dir):
            os.mkdir(self.flags_dir)

        self.group_dir = self.gitdata.data_dir

        # register the sources
        # For each "--source alias path" on the command line, register its existence with
        # the runtime.
        for r in self.source:
            self.register_source_alias(r[0], r[1])

        if self.sources:
            with open(self.sources, 'r') as sf:
                source_dict = yaml.full_load(sf)
                if not isinstance(source_dict, dict):
                    raise ValueError(
                        '--sources param must be a yaml file containing a single dict.'
                    )
                for key, val in source_dict.items():
                    self.register_source_alias(key, val)

        with Dir(self.group_dir):
            self.group_config = self.get_group_config()
            self.arches = self.group_config.get('arches', ['x86_64'])
            self.repos = Repos(self.group_config.repos, self.arches)
            self.freeze_automation = self.group_config.freeze_automation or FREEZE_AUTOMATION_NO

            if validate_content_sets:
                self.repos.validate_content_sets()

            if self.group_config.name != self.group:
                raise IOError(
                    "Name in group.yml does not match group name. Someone may have copied this group without updating group.yml (make sure to check branch)"
                )

            if self.branch is None:
                if self.group_config.branch is not Missing:
                    self.branch = self.group_config.branch
                    self.logger.info("Using branch from group.yml: %s" %
                                     self.branch)
                else:
                    self.logger.info(
                        "No branch specified either in group.yml or on the command line; all included images will need to specify their own."
                    )
            else:
                self.logger.info("Using branch from command line: %s" %
                                 self.branch)

            scanner = self.group_config.image_build_log_scanner
            if scanner is not Missing:
                # compile regexen and fail early if they don't
                regexen = []
                for val in scanner.matches:
                    try:
                        regexen.append(re.compile(val))
                    except Exception as e:
                        raise ValueError(
                            "could not compile image build log regex for group:\n{}\n{}"
                            .format(val, e))
                scanner.matches = regexen

            if self.group_config.brew_tag:
                # setting this overrides tags made out of branches in specific configs
                self.brew_tag = self.group_config.brew_tag

            # Flattens a list like like [ 'x', 'y,z' ] into [ 'x.yml', 'y.yml', 'z.yml' ]
            # for later checking we need to remove from the lists, but they are tuples. Clone to list
            def flatten_list(names):
                if not names:
                    return []
                # split csv values
                result = []
                for n in names:
                    result.append(
                        [x for x in n.replace(' ', ',').split(',') if x != ''])
                # flatten result and remove dupes
                return list(set([y for x in result for y in x]))

            def filter_wip(n, d):
                return d.get('mode', 'enabled') in ['wip', 'enabled']

            def filter_enabled(n, d):
                return d.get('mode', 'enabled') == 'enabled'

            def filter_disabled(n, d):
                return d.get('mode', 'enabled') in ['enabled', 'disabled']

            exclude_keys = flatten_list(self.exclude)
            image_ex = list(exclude_keys)
            rpm_ex = list(exclude_keys)
            image_keys = flatten_list(self.images)
            rpm_keys = flatten_list(self.rpms)

            filter_func = None
            if self.load_wip and self.load_disabled:
                pass  # use no filter, load all
            elif self.load_wip:
                filter_func = filter_wip
            elif self.load_disabled:
                filter_func = filter_disabled
            else:
                filter_func = filter_enabled

            replace_vars = {}
            if self.group_config.vars:
                replace_vars = self.group_config.vars.primitive()

            if config_excludes:
                excludes = self.group_config.get(config_excludes, {})
                image_ex.extend(excludes.get('images', []))
                rpm_ex.extend(excludes.get('rpms', []))

            # pre-load the image data to get the names for all images
            # eventually we can use this to allow loading images by
            # name or distgit. For now this is used elsewhere
            image_name_data = self.gitdata.load_data(path='images')

            for img in image_name_data.itervalues():
                name = img.data.get('name')
                short_name = name.split('/')[1]
                self.image_name_map[name] = img.key
                self.image_name_map[short_name] = img.key

            image_data = self.gitdata.load_data(
                path='images',
                keys=image_keys,
                exclude=image_ex,
                replace_vars=replace_vars,
                filter_funcs=None if len(image_keys) else filter_func)

            try:
                rpm_data = self.gitdata.load_data(
                    path='rpms',
                    keys=rpm_keys,
                    exclude=rpm_ex,
                    replace_vars=replace_vars,
                    filter_funcs=None if len(rpm_keys) else filter_func)
            except gitdata.GitDataPathException:
                # some older versions have no RPMs, that's ok.
                rpm_data = {}

            missed_include = set(image_keys +
                                 rpm_keys) - set(image_data.keys() +
                                                 rpm_data.keys())
            if len(missed_include) > 0:
                raise DoozerFatalError(
                    'The following images or rpms were either missing or filtered out: {}'
                    .format(', '.join(missed_include)))

            if mode in ['images', 'both']:
                for i in image_data.itervalues():
                    metadata = ImageMetadata(self, i)
                    self.image_map[metadata.distgit_key] = metadata
                if not self.image_map:
                    self.logger.warning(
                        "No image metadata directories found for given options within: {}"
                        .format(self.group_dir))

                for image in self.image_map.itervalues():
                    image.resolve_parent()

                # now that ancestry is defined, make sure no cyclic dependencies
                for image in self.image_map.itervalues():
                    for child in image.children:
                        if image.is_ancestor(child):
                            raise DoozerFatalError(
                                '{} cannot be both a parent and dependent of {}'
                                .format(child.distgit_key, image.distgit_key))

                self.generate_image_tree()

            if mode in ['rpms', 'both']:
                for r in rpm_data.itervalues():
                    metadata = RPMMetadata(self, r, clone_source=clone_source)
                    self.rpm_map[metadata.distgit_key] = metadata
                if not self.rpm_map:
                    self.logger.warning(
                        "No rpm metadata directories found for given options within: {}"
                        .format(self.group_dir))

        # Make sure that the metadata is not asking us to check out the same exact distgit & branch.
        # This would almost always indicate someone has checked in duplicate metadata into a group.
        no_collide_check = {}
        for meta in self.rpm_map.values() + self.image_map.values():
            key = '{}/{}/#{}'.format(meta.namespace, meta.name, meta.branch())
            if key in no_collide_check:
                raise IOError(
                    'Complete duplicate distgit & branch; something wrong with metadata: {} from {} and {}'
                    .format(key, meta.config_filename,
                            no_collide_check[key].config_filename))
            no_collide_check[key] = meta

        # Read in the streams definite for this group if one exists
        streams = self.gitdata.load_data(key='streams')
        if streams:
            self.streams = Model(self.gitdata.load_data(key='streams').data)

        if clone_distgits:
            self.clone_distgits()

        self.initialized = True
コード例 #15
0
    def __init__(self, **kwargs):
        # initialize defaults in case no value is given
        self.verbose = False
        self.quiet = False
        self.load_wip = False
        self.load_disabled = False
        self.data_path = None
        self.data_dir = None
        self.brew_tag = None
        self.latest_parent_version = False

        for key, val in kwargs.items():
            self.__dict__[key] = val

        if self.latest_parent_version:
            self.ignore_missing_base = True

        self._remove_tmp_working_dir = False
        self.group_config = None

        # If source needs to be cloned by oit directly, the directory in which it will be placed.
        self.sources_dir = None

        self.distgits_dir = None

        self.record_log = None
        self.record_log_path = None

        self.debug_log_path = None

        self.brew_logs_dir = None

        self.flags_dir = None

        # Map of dist-git repo name -> ImageMetadata object. Populated when group is set.
        self.image_map = {}

        # Map of dist-git repo name -> RPMMetadata object. Populated when group is set.
        self.rpm_map = {}

        # Map of source code repo aliases (e.g. "ose") to a path on the filesystem where it has been cloned.
        # See registry_repo.
        self.source_paths = {}

        # Map of stream alias to image name.
        self.stream_alias_overrides = {}

        self.initialized = False

        # Will be loaded with the streams.yml Model
        self.streams = {}

        # Create a "uuid" which will be used in FROM fields during updates
        self.uuid = datetime.datetime.now().strftime("%Y%m%d.%H%M%S")

        # Optionally available if self.fetch_rpms_for_tag() is called
        self.rpm_list = None
        self.rpm_search_tree = None

        # Used for image build ordering
        self.image_tree = {}
        self.image_order = []
        # allows mapping from name or distgit to meta
        self.image_name_map = {}

        # holds untouched group config
        self.raw_group_config = {}

        # Used to capture missing packages for 4.x build
        self.missing_pkgs = set()

        # Whether to prevent builds for this group. Defaults to 'no'.
        self.freeze_automation = FREEZE_AUTOMATION_NO

        self.rhpkg_config_lst = []
        if self.rhpkg_config:
            if not os.path.isfile(self.rhpkg_config):
                raise DoozerFatalError(
                    '--rhpkg-config option given is not a valid file! {}'.
                    format(self.rhpkg_config))
            self.rhpkg_config = ' --config {} '.format(self.rhpkg_config)
            self.rhpkg_config_lst = self.rhpkg_config.split()
        else:
            self.rhpkg_config = ''
コード例 #16
0
ファイル: rpmcfg.py プロジェクト: tnozicka/doozer
    def build_rpm(self, version, release, terminate_event, scratch=False, retries=3, local=False, dry_run=False):
        """
        Builds a package using tito release.

        If the source repository has the necessary tito configuration in .tito, the build can be
        configured to use that in the standard `tito tag` flow.

        The default flow imitates `tito tag`, but instead of creating a release commit and tagging that,
        the tag is added to the existing commit and a release commit is created afterward. In
        this way, the tag can be pushed back to the source, but pushing the commit is optional.
        [lmeyer 2019-04-01] I looked into customizing the tito tagger to support this flow.
        It was not going to be pretty, and even so would probably require doozer to do
        some modification of the spec first. It seems best to limit the craziness to doozer.

        By default, the tag is pushed, then it and the commit are removed locally after the build.
        But optionally the commit can be pushed before the build, so that the actual commit released is in the source.
        """
        if local:
            raise DoozerFatalError("Local RPM build is not currently supported.")

        # TODO: adjust Doozer's execution for private build
        if self.private_fix:
            self.logger.warning("Source contains embargoed fixes.")

        with self.runtime.get_named_semaphore(self.source_path, is_dir=True):

            with Dir(self.source_path):
                # Remember what we were at before tito activity. We may need to revert
                # to this if we don't push changes back to origin.
                self.pre_init_sha = exectools.cmd_assert('git rev-parse HEAD')[0].strip()

            pval = '.p0'
            if self.runtime.group_config.public_upstreams:
                if not release.endswith(".p?"):
                    raise ValueError(f"'release' must end with '.p?' for an rpm with a public upstream but its actual value is {release}")
                pval = ".p1" if self.private_fix else ".p0"

            if release.endswith(".p?"):
                release = release[:-3]  # strip .p?
                release += pval

            self.set_nvr(version, release)
            self.tito_setup()
            self.update_spec()
            self.commit_changes(scratch)
            action = "build_rpm"
            record = {
                "specfile": self.specfile,
                "source_head": self.source_head,
                "distgit_key": self.distgit_key,
                "rpm": self.rpm_name,
                "version": self.version,
                "release": self.release,
                "message": "Unknown failure",
                "status": -1,
                # Status defaults to failure until explicitly set by succcess. This handles raised exceptions.
            }

            try:
                def wait(n):
                    self.logger.info("Async error in rpm build thread [attempt #{}]: {}".format(n + 1, self.qualified_name))
                    # Brew does not handle an immediate retry correctly, wait
                    # before trying another build, terminating if interrupted.
                    if terminate_event.wait(timeout=5 * 60):
                        raise KeyboardInterrupt()
                try:
                    exectools.retry(
                        retries=3, wait_f=wait,
                        task_f=lambda: self._build_rpm(
                            scratch, record, terminate_event, dry_run))
                except exectools.RetryException as err:
                    self.logger.error(str(err))
                    return (self.distgit_key, False)

                record["message"] = "Success"
                record["status"] = 0
                self.build_status = True

            except (Exception, KeyboardInterrupt):
                tb = traceback.format_exc()
                record["message"] = "Exception occurred:\n{}".format(tb)
                self.logger.info("Exception occurred during build:\n{}".format(tb))
                # This is designed to fall through to finally. Since this method is designed to be
                # threaded, we should not throw an exception; instead return False.
            finally:
                self.runtime.add_record(action, **record)

            self.post_build(scratch)
            return self.distgit_key, self.build_status
コード例 #17
0
def release_gen_payload(runtime: Runtime, is_name: Optional[str],
                        is_namespace: Optional[str],
                        organization: Optional[str], repository: Optional[str],
                        exclude_arch: Tuple[str, ...], skip_gc_tagging: bool,
                        emergency_ignore_issues: bool):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.

## Validation ##

Additionally we want to check that the following conditions are true for each
imagestream being updated:

* For all architectures built, RHCOS builds must have matching versions of any
  unshipped RPM they include (per-entry os metadata - the set of RPMs may differ
  between arches, but versions should not).
* Any RPMs present in images (including machine-os-content) from unshipped RPM
  builds included in one of our candidate tags must exactly version-match the
  latest RPM builds in those candidate tags (ONLY; we never flag what we don't
  directly ship.)

These checks (and likely more in the future) should run and any failures should
be listed in brief via a "release.openshift.io/inconsistency" annotation on the
relevant image istag (these are publicly visible; ref. https://bit.ly/37cseC1)
and in more detail in state.yaml. The release-controller, per ART-2195, will
read and propagate/expose this annotation in its display of the release image.
    """
    runtime.initialize(mode='both',
                       clone_distgits=False,
                       clone_source=False,
                       prevent_cloning=True)

    if runtime.assembly not in {
            None, "stream", "test"
    } and runtime.assembly not in runtime.releases_config.releases:
        raise DoozerFatalError(
            f"Assembly '{runtime.assembly}' is not explicitly defined.")

    logger = runtime.logger
    brew_session = runtime.build_retrying_koji_client()

    base_imagestream_name: str = is_name if is_name else assembly_imagestream_base_name(
        runtime)
    base_istream_namespace: str = is_namespace if is_namespace else default_imagestream_namespace_base_name(
    )

    if runtime.assembly and runtime.assembly != 'stream' and 'art-latest' in base_imagestream_name:
        raise ValueError(
            'The art-latest imagestreams should not be used for an assembly other than "stream"'
        )

    logger.info(
        f'Collecting latest information associated with the assembly: {runtime.assembly}'
    )
    assembly_inspector = AssemblyInspector(runtime, brew_session)
    logger.info('Checking for mismatched siblings...')
    mismatched_siblings = PayloadGenerator.find_mismatched_siblings(
        assembly_inspector.get_group_release_images().values())

    # A list of strings that denote inconsistencies across all payloads generated
    assembly_issues: List[AssemblyIssue] = list()

    for mismatched_bbii, sibling_bbi in mismatched_siblings:
        mismatch_issue = AssemblyIssue(
            f'{mismatched_bbii.get_nvr()} was built from a different upstream source commit ({mismatched_bbii.get_source_git_commit()[:7]}) than one of its siblings {sibling_bbi.get_nvr()} from {sibling_bbi.get_source_git_commit()[:7]}',
            component=mismatched_bbii.get_image_meta().distgit_key,
            code=AssemblyIssueCode.MISMATCHED_SIBLINGS)
        assembly_issues.append(mismatch_issue)

    report = dict()
    report['non_release_images'] = [
        image_meta.distgit_key
        for image_meta in runtime.get_non_release_image_metas()
    ]
    report['release_images'] = [
        image_meta.distgit_key
        for image_meta in runtime.get_for_release_image_metas()
    ]
    report['missing_image_builds'] = [
        dgk
        for (dgk, ii) in assembly_inspector.get_group_release_images().items()
        if ii is None
    ]  # A list of metas where the assembly did not find a build

    if runtime.assembly_type is AssemblyTypes.STREAM:
        # Only nightlies have the concept of private and public payloads
        privacy_modes = [False, True]
    else:
        privacy_modes = [False]

    # Structure to record rhcos builds we use so that they can be analyzed for inconsistencies
    targeted_rhcos_builds: Dict[bool, List[RHCOSBuildInspector]] = {
        False: [],
        True: []
    }
    """
    Collect a list of builds we to tag in order to prevent garbage collection.
    Note: we also use this list to warm up caches, so don't wrap this section
    with `if not skip_gc_tagging`.

    To prevent garbage collection for custom
    assemblies (which won't normally be released via errata tool, triggering
    the traditional garbage collection prevention), we must tag these builds
    explicitly to prevent their GC. It is necessary to prevent GC, because
    we want to be able to build custom releases off of custom releases, and
    so on. If we loose images and builds for custom releases in brew due
    to garbage collection, we will not be able to construct derivative
    release payloads.
    """
    assembly_build_ids: Set[int] = set(
    )  # This list of builds associated with the group/assembly will be used to warm up caches

    list_tags_tasks: Dict[Tuple[int, str], Any] = dict(
    )  # Maps (build_id, tag) tuple to multicall task to list tags
    with runtime.pooled_koji_client_session() as pcs:
        with pcs.multicall(strict=True) as m:
            for bbii in assembly_inspector.get_group_release_images().values():
                if bbii:
                    build_id = bbii.get_brew_build_id()
                    assembly_build_ids.add(
                        build_id)  # Collect up build ids for cache warm up
                    hotfix_tag = bbii.get_image_meta().hotfix_brew_tag()
                    list_tags_tasks[(build_id,
                                     hotfix_tag)] = m.listTags(build=build_id)

            # RPMs can build for multiple versions of RHEL. For example, a single RPM
            # metadata can target 7 & 8.
            # For each rhel version targeted by our RPMs, build a list of RPMs
            # appropriate for the RHEL version with respect to the group/assembly.
            rhel_version_scanned_for_rpms: Dict[int, bool] = dict(
            )  # Maps rhel version -> bool indicating whether we have processed that rhel version
            for rpm_meta in runtime.rpm_metas():
                for el_ver in rpm_meta.determine_rhel_targets():
                    if el_ver in rhel_version_scanned_for_rpms:
                        # We've already processed this RHEL version.
                        continue
                    hotfix_tag = runtime.get_default_hotfix_brew_tag(
                        el_target=el_ver)
                    # Otherwise, query the assembly for this rhel version now.
                    for dgk, rpm_build_dict in assembly_inspector.get_group_rpm_build_dicts(
                            el_ver=el_ver).items():
                        if not rpm_build_dict:
                            # RPM not built for this rhel version
                            continue
                        build_id = rpm_build_dict['id']
                        assembly_build_ids.add(
                            build_id)  # For cache warm up later.
                        list_tags_tasks[(build_id, hotfix_tag)] = m.listTags(
                            build=build_id)
                    # Record that we are done for this rhel version.
                    rhel_version_scanned_for_rpms[el_ver] = True

    # Tasks should now contain tag list information for all builds associated with this assembly.
    # and assembly_build_ids should contain ids for builds that should be cached.

    # We have a list of image and RPM builds associated with this assembly.
    # Tag them unless we have been told not to from the command line.
    if runtime.assembly_type != AssemblyTypes.STREAM and not skip_gc_tagging:
        with runtime.shared_koji_client_session() as koji_api:
            koji_api.gssapi_login()  # Tagging requires authentication
            with koji_api.multicall() as m:
                for tup, list_tag_task in list_tags_tasks.items():
                    build_id = tup[0]
                    desired_tag = tup[1]
                    current_tags = [
                        tag_entry['name'] for tag_entry in list_tag_task.result
                    ]
                    if desired_tag not in current_tags:
                        # The hotfix tag is missing, so apply it.
                        runtime.logger.info(
                            f'Adding tag {desired_tag} to build: {build_id} to prevent garbage collection.'
                        )
                        m.tagBuild(desired_tag, build_id)

    with runtime.shared_build_status_detector() as bsd:
        bsd.populate_archive_lists(assembly_build_ids)
        bsd.find_shipped_builds(assembly_build_ids)
    """
    Make sure that RPMs belonging to this assembly/group are consistent with the assembly definition.
    """
    for rpm_meta in runtime.rpm_metas():
        issues = assembly_inspector.check_group_rpm_package_consistency(
            rpm_meta)
        assembly_issues.extend(issues)
    """
    If this is a stream assembly, images which are not using the latest builds should not reach
    the release controller. Other assemblies are meant to be constructed from non-latest.
    """
    if runtime.assembly == 'stream':
        for dgk, build_inspector in assembly_inspector.get_group_release_images(
        ).items():
            if build_inspector:
                non_latest_rpm_nvrs = build_inspector.find_non_latest_rpms()
                dgk = build_inspector.get_image_meta().distgit_key
                for installed_nvr, newest_nvr in non_latest_rpm_nvrs:
                    # This indicates an issue with scan-sources or that an image is no longer successfully building.
                    # Impermissible as this speaks to a potentially deeper issue of images not being rebuilt
                    outdated_issue = AssemblyIssue(
                        f'Found outdated RPM ({installed_nvr}) installed in {build_inspector.get_nvr()} when {newest_nvr} was available',
                        component=dgk,
                        code=AssemblyIssueCode.OUTDATED_RPMS_IN_STREAM_BUILD)
                    assembly_issues.append(
                        outdated_issue)  # Add to overall issues
    """
    Make sure image build selected by this assembly/group are consistent with the assembly definition.
    """
    for dgk, bbii in assembly_inspector.get_group_release_images().items():
        if bbii:
            issues = assembly_inspector.check_group_image_consistency(bbii)
            assembly_issues.extend(issues)

    for arch in runtime.arches:
        if arch in exclude_arch:
            logger.info(f'Excluding payload files architecture: {arch}')
            continue

        # Whether private or public, the assembly's canonical payload content is the same.
        entries: Dict[str, PayloadGenerator.
                      PayloadEntry] = PayloadGenerator.find_payload_entries(
                          assembly_inspector, arch,
                          f'quay.io/{organization}/{repository}'
                      )  # Key of this dict is release payload tag name

        for tag, payload_entry in entries.items():
            if payload_entry.image_meta:
                # We already stored inconsistencies for each image_meta; look them up if there are any.
                payload_entry.issues.extend(
                    filter(
                        lambda ai: ai.component == payload_entry.image_meta.
                        distgit_key, assembly_issues))
            elif payload_entry.rhcos_build:
                assembly_issues.extend(
                    assembly_inspector.check_rhcos_issues(
                        payload_entry.rhcos_build))
                payload_entry.issues.extend(
                    filter(lambda ai: ai.component == 'rhcos',
                           assembly_issues))
                if runtime.assembly == 'stream':
                    # For stream alone, we want to enforce that the very latest RPMs are installed.
                    non_latest_rpm_nvrs = payload_entry.rhcos_build.find_non_latest_rpms(
                    )
                    for installed_nvr, newest_nvr in non_latest_rpm_nvrs:
                        assembly_issues.append(
                            AssemblyIssue(
                                f'Found outdated RPM ({installed_nvr}) installed in {payload_entry.rhcos_build} when {newest_nvr} is available',
                                component='rhcos',
                                code=AssemblyIssueCode.
                                OUTDATED_RPMS_IN_STREAM_BUILD))
            else:
                raise IOError(f'Unsupported PayloadEntry: {payload_entry}')

        # Save the default SRC=DEST input to a file for syncing by 'oc image mirror'. Why is
        # there no '-priv'? The true images for the assembly are what we are syncing -
        # it is what we update in the imagestream that defines whether the image will be
        # part of a public release.
        dests: Set[str] = set(
        )  # Prevents writing the same destination twice (not supported by oc)
        with io.open(f"src_dest.{arch}", "w+", encoding="utf-8") as out_file:
            for payload_entry in entries.values():
                if not payload_entry.archive_inspector:
                    # Nothing to mirror (e.g. machine-os-content)
                    continue
                if payload_entry.dest_pullspec in dests:
                    # Don't write the same destination twice.
                    continue
                out_file.write(
                    f"{payload_entry.archive_inspector.get_archive_pullspec()}={payload_entry.dest_pullspec}\n"
                )
                dests.add(payload_entry.dest_pullspec)

        for private_mode in privacy_modes:
            logger.info(
                f'Building payload files for architecture: {arch}; private: {private_mode}'
            )

            file_suffix = arch + '-priv' if private_mode else arch
            with io.open(f"image_stream.{file_suffix}.yaml",
                         "w+",
                         encoding="utf-8") as out_file:
                istags: List[Dict] = []
                for payload_tag_name, payload_entry in entries.items():
                    if payload_entry.build_inspector and payload_entry.build_inspector.is_under_embargo(
                    ) and private_mode is False:
                        # Don't send this istag update to the public release controller
                        continue
                    istags.append(
                        PayloadGenerator.build_payload_istag(
                            payload_tag_name, payload_entry))

                imagestream_name, imagestream_namespace = payload_imagestream_name_and_namespace(
                    base_imagestream_name, base_istream_namespace, arch,
                    private_mode)

                istream_spec = PayloadGenerator.build_payload_imagestream(
                    imagestream_name, imagestream_namespace, istags,
                    assembly_issues)
                yaml.safe_dump(istream_spec,
                               out_file,
                               indent=2,
                               default_flow_style=False)

    # Now make sure that all of the RHCOS builds contain consistent RPMs
    for private_mode in privacy_modes:
        rhcos_builds = targeted_rhcos_builds[private_mode]
        rhcos_inconsistencies: Dict[
            str,
            List[str]] = PayloadGenerator.find_rhcos_build_rpm_inconsistencies(
                rhcos_builds)
        if rhcos_inconsistencies:
            assembly_issues.append(
                AssemblyIssue(
                    f'Found RHCOS inconsistencies in builds {targeted_rhcos_builds}: {rhcos_inconsistencies}',
                    component='rhcos',
                    code=AssemblyIssueCode.INCONSISTENT_RHCOS_RPMS))

    # If the assembly claims to have reference nightlies, assert that our payload
    # matches them exactly.
    nightly_match_issues = PayloadGenerator.check_nightlies_consistency(
        assembly_inspector)
    if nightly_match_issues:
        assembly_issues.extend(nightly_match_issues)

    assembly_issues_report: Dict[str, List[Dict]] = dict()
    report['assembly_issues'] = assembly_issues_report

    overall_permitted = True
    for ai in assembly_issues:
        permitted = assembly_inspector.does_permit(ai)
        overall_permitted &= permitted  # If anything is not permitted, exit with an error
        assembly_issues_report.setdefault(ai.component, []).append({
            'code':
            ai.code.name,
            'msg':
            ai.msg,
            'permitted':
            permitted
        })

    report['viable'] = overall_permitted

    print(yaml.dump(report, default_flow_style=False, indent=2))
    if not overall_permitted:
        red_print(
            'DO NOT PROCEED WITH THIS ASSEMBLY PAYLOAD -- not all detected issues are permitted.',
            file=sys.stderr)
        if not emergency_ignore_issues:
            exit(1)
    exit(0)
コード例 #18
0
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id):
    """Generates two sets of input files for `oc` commands to mirror
content and update image streams. Files are generated for each arch
defined in ocp-build-data for a version, as well as a final file for
manifest-lists.

One set of files are SRC=DEST mirroring definitions for 'oc image
mirror'. They define what source images we will sync to which
destination repos, and what the mirrored images will be labeled as.

The other set of files are YAML image stream tags for 'oc
apply'. Those are applied to an openshift cluster to define "release
streams". When they are applied the release controller notices the
update and begins generating a new payload with the images tagged in
the image stream.

For automation purposes this command generates a mirroring yaml files
after the arch-specific files have been generated. The yaml files
include names of generated content.

You may provide the namespace and base name for the image streams, or defaults
will be used. The generated files will append the -arch and -priv suffixes to
the given name and namespace as needed.

The ORGANIZATION and REPOSITORY options are combined into
ORGANIZATION/REPOSITORY when preparing for mirroring.

Generate files for mirroring from registry-proxy (OSBS storage) to our
quay registry:

\b
    $ doozer --group=openshift-4.2 release:gen-payload \\
        --is-name=4.2-art-latest

Note that if you use -i to include specific images, you should also include
openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image
is used automatically as a stand-in for images when an arch does not build
that particular tag.
    """
    runtime.initialize(clone_distgits=False, config_excludes='non_release')
    orgrepo = "{}/{}".format(organization, repository)
    cmd = runtime.command
    runtime.state[cmd] = dict(state.TEMPLATE_IMAGE)
    lstate = runtime.state[cmd]  # get local convenience copy

    if not is_name:
        is_name = default_is_base_name(runtime.get_minor_version())
    if not is_namespace:
        is_namespace = default_is_base_namespace()

    images = [i for i in runtime.image_metas()]
    lstate['total'] = len(images)

    no_build_items = []
    invalid_name_items = []

    payload_images = []
    for image in images:
        # Per clayton:
        """Tim Bielawa: note to self: is only for `ose-` prefixed images
        Clayton Coleman: Yes, Get with the naming system or get out of town
        """
        if image.is_payload:
            if not image.image_name_short.startswith("ose-"):
                invalid_name_items.append(image.image_name_short)
                red_print("NOT adding to IS (does not meet name/version conventions): {}".format(image.image_name_short))
                continue
            else:
                payload_images.append(image)

    runtime.logger.info("Fetching latest image builds from Brew...")
    tag_component_tuples = [(image.candidate_brew_tag(), image.get_component_name()) for image in payload_images]
    brew_session = runtime.build_retrying_koji_client()
    latest_builds = brew.get_latest_builds(tag_component_tuples, "image", event_id, brew_session)
    latest_builds = [builds[0] if builds else None for builds in latest_builds]  # flatten the data structure

    runtime.logger.info("Fetching image archives...")
    build_ids = [b["id"] if b else 0 for b in latest_builds]
    archives_list = brew.list_archives_by_builds(build_ids, "image", brew_session)

    mismatched_siblings = find_mismatched_siblings(payload_images, latest_builds, archives_list, runtime.logger, lstate)

    embargoed_build_ids = set()  # a set of private image build ids
    if runtime.group_config.public_upstreams:
        # looking for embargoed image builds
        detector = embargo_detector.EmbargoDetector(brew_session, runtime.logger)
        for index, archive_list in enumerate(archives_list):
            if build_ids[index]:
                detector.archive_lists[build_ids[index]] = archive_list  # store to EmbargoDetector cache to limit Brew queries
        suspects = [b for b in latest_builds if b]
        embargoed_build_ids = detector.find_embargoed_builds(suspects)

    runtime.logger.info("Creating mirroring lists...")

    # These will map[arch] -> map[image_name] -> { version: version, release: release, image_src: image_src }
    mirroring = {}
    for i, image in enumerate(payload_images):
        latest_build = latest_builds[i]
        archives = archives_list[i]
        error = None
        if image.distgit_key in mismatched_siblings:
            error = "Siblings built from different commits"
        elif not (latest_build and archives):  # build or archive doesn't exist
            error = f"Unable to find build for: {image.image_name_short}"
            no_build_items.append(image.image_name_short)
        else:
            for archive in archives:
                arch = archive["arch"]
                pullspecs = archive["extra"]["docker"]["repositories"]
                if not pullspecs or ":" not in pullspecs[-1]:  # in case of no pullspecs or invalid format
                    error = f"Unable to find pullspecs for: {image.image_name_short}"
                    red_print(error, file=sys.stderr)
                    state.record_image_fail(lstate, image, error, runtime.logger)
                    break
                # The tag that will be used in the imagestreams
                tag_name = image.image_name_short
                tag_name = tag_name[4:] if tag_name.startswith("ose-") else tag_name  # it _should_ but... to be safe
                digest = archive["extra"]['docker']['digests']['application/vnd.docker.distribution.manifest.v2+json']
                if not digest.startswith("sha256:"):  # It should start with sha256: for now. Let's raise an error if this changes.
                    raise ValueError(f"Received unrecognized digest {digest} for image {pullspecs[-1]}")
                mirroring_value = {'version': latest_build["version"], 'release': latest_build["release"], 'image_src': pullspecs[-1], 'digest': digest}
                embargoed = latest_build["id"] in embargoed_build_ids  # when public_upstreams are not configured, this is always false
                if not embargoed:  # exclude embargoed images from the ocp[-arch] imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the public mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(arch, {})[tag_name] = mirroring_value
                else:
                    red_print(f"Found embargoed image {pullspecs[-1]}")
                if runtime.group_config.public_upstreams:
                    # when public_upstreams are configured, both embargoed and non-embargoed images should be included in the ocp[-arch]-priv imagestreams
                    runtime.logger.info(f"Adding {arch} image {pullspecs[-1]} to the private mirroring list with imagestream tag {tag_name}...")
                    mirroring.setdefault(f"{arch}-priv", {})[tag_name] = mirroring_value
        if not error:
            state.record_image_success(lstate, image)
        else:
            red_print(error, file=sys.stderr)
            state.record_image_fail(lstate, image, error, runtime.logger)

    for key in mirroring:
        private = key.endswith("-priv")
        arch = key[:-5] if private else key  # strip `-priv` suffix

        mirror_filename = 'src_dest.{}'.format(key)
        imagestream_filename = 'image_stream.{}'.format(key)
        target_is_name, target_is_namespace = is_name_and_space(is_name, is_namespace, arch, private)

        def build_dest_name(tag_name):
            entry = mirroring[key][tag_name]
            tag = entry["digest"].replace(":", "-")  # sha256:abcdef -> sha256-abcdef
            return f"quay.io/{orgrepo}:{tag}"

        # Save the default SRC=DEST 'oc image mirror' input to a file for
        # later.
        with io.open(mirror_filename, 'w+', encoding="utf-8") as out_file:
            for tag_name in mirroring[key]:
                dest = build_dest_name(tag_name)
                out_file.write("{}={}\n".format(mirroring[key][tag_name]['image_src'], dest))

        with io.open("{}.yaml".format(imagestream_filename), 'w+', encoding="utf-8") as out_file:
            # Add a tag spec to the image stream. The name of each tag
            # spec does not include the 'ose-' prefix. This keeps them
            # consistent between OKD and OCP

            # Template Base Image Stream object.
            tag_list = []
            isb = {
                'kind': 'ImageStream',
                'apiVersion': 'image.openshift.io/v1',
                'metadata': {
                    'name': target_is_name,
                    'namespace': target_is_namespace,
                },
                'spec': {
                    'tags': tag_list,
                }
            }

            for tag_name in mirroring[key]:
                tag_list.append({
                    'name': tag_name,
                    'from': {
                        'kind': 'DockerImage',
                        'name': build_dest_name(tag_name)
                    }
                })

            # mirroring rhcos
            runtime.logger.info(f"Getting latest RHCOS pullspec for {target_is_name}...")
            mosc_istag = _latest_mosc_istag(runtime, arch, private)
            if mosc_istag:
                tag_list.append(mosc_istag)

            # Not all images are built for non-x86 arches (e.g. kuryr), but they
            # may be mentioned in image references. Thus, make sure there is a tag
            # for every tag we find in x86_64 and provide just a dummy image.
            if 'cli' not in mirroring[key]:  # `cli` serves as the dummy image for the replacement
                if runtime.group_config.public_upstreams and not private:  # If cli is embargoed, it is expected that cli is missing in any non *-priv imagestreams.
                    runtime.logger.warning(f"Unable to find cli tag from {key} imagestream. Is `cli` image embargoed?")
                else:  # if CVE embargoes supporting is disabled or the "cli" image is also missing in *-priv namespaces, an error will be raised.
                    raise DoozerFatalError('A dummy image is required for tag {} on arch {}, but unable to find cli tag for this arch'.format(tag_name, arch))
            else:
                extra_tags = mirroring['x86_64-priv' if private else 'x86_64'].keys() - mirroring[key].keys()
                for tag_name in extra_tags:
                    yellow_print('Unable to find tag {} for arch {} ; substituting cli image'.format(tag_name, arch))
                    tag_list.append({
                        'name': tag_name,
                        'from': {
                            'kind': 'DockerImage',
                            'name': build_dest_name('cli')  # cli is always built and is harmless
                        }
                    })

            yaml.safe_dump(isb, out_file, indent=2, default_flow_style=False)

    if no_build_items:
        yellow_print("No builds found for:")
        for img in sorted(no_build_items):
            click.echo("   {}".format(img))

    if invalid_name_items:
        yellow_print("Images skipped due to invalid naming:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))

    if mismatched_siblings:
        yellow_print("Images skipped due to siblings mismatch:")
        for img in sorted(invalid_name_items):
            click.echo("   {}".format(img))