Пример #1
0
def test_is_scratch_build(build_json, scratch):
    flexmock(util).should_receive('get_build_json').and_return(build_json)
    if scratch is None:
        with pytest.raises(KeyError):
            is_scratch_build()
    else:
        assert is_scratch_build() == scratch
Пример #2
0
def test_is_scratch_build(build_json, scratch):
    flexmock(util).should_receive('get_build_json').and_return(build_json)
    if scratch is None:
        with pytest.raises(KeyError):
            is_scratch_build()
    else:
        assert is_scratch_build() == scratch
    def run(self):
        """
        run the plugin
        """

        parser = df_parser(self.workflow.builder.df_path, workflow=self.workflow)
        dockerfile_labels = parser.labels
        labels = Labels(dockerfile_labels)

        component_label = labels.get_name(Labels.LABEL_TYPE_COMPONENT)

        try:
            component = dockerfile_labels[component_label]
        except KeyError:
            raise RuntimeError("missing label: {}".format(component_label))

        version_label = labels.get_name(Labels.LABEL_TYPE_VERSION)
        try:
            version = dockerfile_labels[version_label]
        except KeyError:
            raise RuntimeError('missing label: {}'.format(version_label))

        try:
            _, release = labels.get_name_and_value(Labels.LABEL_TYPE_RELEASE)
        except KeyError:
            release = None

        if release:
            if not self.append:
                self.log.debug("release set explicitly so not incrementing")
                if not is_scratch_build():
                    self.check_build_existence_for_explicit_release(component, version, release)
                return

        if self.append:
            next_release = self.get_next_release_append(component, version, release)
        elif is_scratch_build():
            metadata = get_build_json().get("metadata", {})
            next_release = metadata.get("name", "1")
        else:
            next_release = self.get_next_release_standard(component, version)

        # Always set preferred release label - other will be set if old-style
        # label is present
        release_label = labels.LABEL_NAMES[Labels.LABEL_TYPE_RELEASE][0]

        # No release labels are set so set them
        self.log.info("setting %s=%s", release_label, next_release)
        # Write the label back to the file (this is a property setter)
        dockerfile_labels[release_label] = next_release
Пример #4
0
    def run(self):
        """
        run the plugin
        """
        if not self.workflow.data.dockerfile_images:
            self.log.info(
                "Skipping plugin, from scratch stage(s) can't add repos")
            return

        if self.include_koji_repo:
            self.add_koji_repo()
        else:
            self.log.info(
                "'include_koji_repo parameter is set to '%s', not including koji repo",
                self.include_koji_repo)

        if self.repourls and not is_scratch_build(self.workflow):
            self.validate_yum_repo_files_url()

        fetched_yum_repos = {}
        for platform in self.platforms:
            for repourl in self.repourls.get(platform, []):
                if repourl in fetched_yum_repos:
                    yum_repo = fetched_yum_repos[repourl]
                    self.yum_repos[platform].append(yum_repo)
                    continue
                yum_repo = YumRepo(repourl)
                self.log.info("fetching yum repo from '%s'", yum_repo.repourl)
                try:
                    yum_repo.fetch()
                except Exception as e:
                    msg = "Failed to fetch yum repo {repo}: {exc}".format(
                        repo=yum_repo.repourl, exc=e)
                    raise RuntimeError(msg) from e
                else:
                    self.log.info("fetched yum repo from '%s'",
                                  yum_repo.repourl)

                if self.inject_proxy:
                    if yum_repo.is_valid():
                        yum_repo.set_proxy_for_all_repos(self.inject_proxy)
                self.log.debug("saving yum repo '%s', length %d",
                               yum_repo.dst_filename, len(yum_repo.content))
                self.yum_repos[platform].append(yum_repo)
                fetched_yum_repos[repourl] = yum_repo

        if not self.yum_repos:
            return

        self._builder_ca_bundle = self.workflow.conf.builder_ca_bundle
        if self._builder_ca_bundle:
            self._ca_bundle_pem = os.path.basename(self._builder_ca_bundle)

        self.workflow.build_dir.for_each_platform(self._inject_into_repo_files)
        self.workflow.build_dir.for_each_platform(self._inject_into_dockerfile)

        for platform in self.platforms:
            for repo in self.yum_repos[platform]:
                self.log.info("injected yum repo: %s for '%s' platform",
                              repo.dst_filename, platform)
Пример #5
0
    def run(self):
        # Only run if the build was successful
        if self.workflow.build_process_failed:
            self.log.info("Not importing failed build")
            return

        if is_scratch_build(self.workflow):
            self.log.info('scratch build, skipping plugin')
            return

        if not self.imagestream_name:
            self.log.info('no imagestream provided, skipping plugin')
            return

        self.floating_images = get_floating_images(self.workflow)
        if not self.floating_images:
            self.log.info('No floating tags to import, skipping import_image')
            return

        self.resolve_docker_image_repo()

        self.osbs = get_openshift_session(self.workflow, self.openshift_fallback)
        self.get_or_create_imagestream()

        self.osbs.import_image_tags(self.imagestream_name, self.get_trackable_tags(),
                                    self.docker_image_repo, insecure=self.insecure_registry)
    def run(self):
        """
        Run the plugin.
        """
        if self.workflow.build_process_failed:
            self.log.info('Build failed, skipping koji tagging')
            return

        if is_scratch_build(self.workflow):
            self.log.info('scratch build, skipping plugin')
            return

        if not self.target:
            self.log.info('no koji target provided, skipping plugin')
            return

        build_id = self.workflow.exit_results.get(KojiImportPlugin.key)
        if not build_id:
            self.log.info('No koji build from %s', KojiImportPlugin.key)
            return

        session = get_koji_session(self.workflow)
        build_tag = tag_koji_build(session, build_id, self.target,
                                   poll_interval=self.poll_interval)

        return build_tag
Пример #7
0
    def run(self):
        """Run the plugin."""
        metadatas: Dict[str, Dict[str, Any]] = {}
        wf_data = self.workflow.data

        enabled_platforms = get_platforms(wf_data)
        if not enabled_platforms:
            raise ValueError("No enabled platforms.")

        for platform in enabled_platforms:
            koji_metadata, output_files = self._get_build_metadata(platform)
            self._update_remote_host_metadata(platform, koji_metadata)

            if not is_scratch_build(self.workflow):
                for output in output_files:
                    wf_data.koji_upload_files.append({
                        "local_filename":
                        output.filename,
                        "dest_filename":
                        output.metadata["filename"],
                    })

            metadatas[platform] = koji_metadata

        return metadatas
Пример #8
0
    def should_run(self):
        """
        Check if the plugin should run or skip execution.

        :return: bool, False if plugin should skip execution
        """
        if not self.is_in_orchestrator():
            self.log.warning("%s plugin set to run on worker. Skipping",
                             self.key)
            return False

        if not get_omps_config(self.workflow, None):
            self.log.info("Integration with OMPS is not configured. Skipping")
            return False

        if not has_operator_manifest(self.workflow):
            self.log.info("Not an operator build. Skipping")
            return False

        if is_scratch_build():
            self.log.info('Scratch build. Skipping')
            return False

        if is_rebuild(self.workflow):
            self.log.info('Autorebuild. Skipping')
            return False

        if is_isolated_build():
            self.log.info('Isolated build. Skipping')
            return False

        return True
Пример #9
0
    def __init__(self,
                 workflow,
                 poll_interval=5,
                 blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE,
                 repos=None,
                 koji_target=None):
        """
        :param workflow: DockerBuildWorkflow instance
        :param poll_interval: int, seconds between polling Koji while waiting
                              for task completion
        :param blocksize: int, chunk size for downloading files from koji
        :param repos: list<str>: list of yum repo URLs to be used during
                      base filesystem creation. First value will also
                      be used as install_tree. Only baseurl value is used
                      from each repo file.
        :param koji_target: str, koji target name
        """
        # call parent constructor
        super(AddFilesystemPlugin, self).__init__(workflow)

        self.poll_interval = poll_interval
        self.blocksize = blocksize
        self.repos = repos or []
        self.architectures = get_platforms(self.workflow.data)
        self.scratch = util.is_scratch_build(self.workflow)
        self.koji_target = koji_target
        self.session = None
    def run(self) -> Optional[List[Dict[str, Any]]]:
        if (not self.workflow.conf.allow_multiple_remote_sources
                and self.multiple_remote_sources_params):
            raise ValueError('Multiple remote sources are not enabled, '
                             'use single remote source in container.yaml')

        if not (self.single_remote_source_params or self.multiple_remote_sources_params):
            self.log.info('Aborting plugin execution: missing remote source configuration')
            return None

        if not self.workflow.conf.cachito:
            raise RuntimeError('No Cachito configuration defined')

        if self._dependency_replacements and not is_scratch_build(self.workflow):
            raise ValueError('Cachito dependency replacements are only allowed for scratch builds')
        if self._dependency_replacements and self.multiple_remote_sources_params:
            raise ValueError('Cachito dependency replacements are not allowed '
                             'for multiple remote sources')

        processed_remote_sources = self.process_remote_sources()
        self.inject_remote_sources(processed_remote_sources)

        return [
            self.remote_source_to_output(remote_source)
            for remote_source in processed_remote_sources
        ]
Пример #11
0
    def run(self):
        if is_scratch_build(self.workflow):
            # required only to make an archive for Koji
            self.log.info('scratch build, skipping plugin')
            return

        return self.workflow.build_dir.for_each_platform(self.download_image)
Пример #12
0
    def __init__(self, tasker, workflow,
                 from_task_id=None, poll_interval=5,
                 blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE,
                 repos=None, architectures=None,
                 architecture=None, koji_target=None):
        """
        :param tasker: ContainerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param from_task_id: int, use existing Koji image task ID
        :param poll_interval: int, seconds between polling Koji while waiting
                              for task completion
        :param blocksize: int, chunk size for streaming files from koji
        :param repos: list<str>: list of yum repo URLs to be used during
                      base filesystem creation. First value will also
                      be used as install_tree. Only baseurl value is used
                      from each repo file.
        :param architectures: list<str>, list of arches to build on (orchestrator) - UNUSED
        :param architecture: str, arch to build on (worker)
        :param koji_target: str, koji target name
        """
        # call parent constructor
        super(AddFilesystemPlugin, self).__init__(tasker, workflow)

        self.from_task_id = from_task_id
        self.poll_interval = poll_interval
        self.blocksize = blocksize
        self.repos = repos or []
        self.architectures = get_platforms(self.workflow)
        self.architecture = architecture
        self.scratch = util.is_scratch_build(self.workflow)
        self.koji_target = koji_target
        self.session = None
Пример #13
0
    def run(self):
        """
        run the plugin
        """
        if (self.workflow.builder.dockerfile_images.base_from_scratch and
                not self.workflow.builder.dockerfile_images):
            self.log.info("Skipping add yum repo by url: unsupported for FROM-scratch images")
            return

        if self.repourls and not is_scratch_build(self.workflow):
            self.validate_yum_repo_files_url()

        for repourl in self.repourls:
            yumrepo = YumRepo(repourl)
            self.log.info("fetching yum repo from '%s'", yumrepo.repourl)
            try:
                yumrepo.fetch()
            except Exception as e:
                msg = "Failed to fetch yum repo {repo}: {exc}".format(
                    repo=yumrepo.repourl, exc=e)
                raise RuntimeError(msg) from e
            else:
                self.log.info("fetched yum repo from '%s'", yumrepo.repourl)

            if self.inject_proxy:
                if yumrepo.is_valid():
                    yumrepo.set_proxy_for_all_repos(self.inject_proxy)
            self.workflow.files[yumrepo.dst_filename] = yumrepo.content.decode()
            self.log.debug("saving yum repo '%s', length %d", yumrepo.dst_filename,
                           len(yumrepo.content))
Пример #14
0
    def run(self):
        if is_scratch_build(self.workflow):
            # required only to make an archive for Koji
            self.log.info('scratch build, skipping plugin')
            return

        if self.load_exported_image and len(self.workflow.exported_image_sequence) > 0:
            image_metadata = self.workflow.exported_image_sequence[-1]
            image = image_metadata.get('path')
            image_type = image_metadata.get('type')
            self.log.info('preparing to compress image %s', image)
            with open(image, 'rb') as image_stream:
                outfile = self._compress_image_stream(image_stream)
        else:
            if self.source_build:
                self.log.info('skipping, no exported source image to compress')
                return
            image = self.workflow.image
            image_type = IMAGE_TYPE_DOCKER_ARCHIVE
            self.log.info('fetching image %s from docker', image)
            with self.tasker.get_image(image) as image_stream:
                outfile = self._compress_image_stream(image_stream)
        metadata = get_exported_image_metadata(outfile, image_type)

        if self.uncompressed_size != 0:
            metadata['uncompressed_size'] = self.uncompressed_size
            savings = 1 - metadata['size'] / metadata['uncompressed_size']
            self.log.debug('uncompressed: %s, compressed: %s, ratio: %.2f %% saved',
                           human_size(metadata['uncompressed_size']),
                           human_size(metadata['size']),
                           100*savings)

        self.workflow.exported_image_sequence.append(metadata)
        self.log.info('compressed image is available as %s', outfile)
Пример #15
0
    def __init__(self,
                 tasker,
                 workflow,
                 koji_hub=None,
                 koji_proxyuser=None,
                 koji_ssl_certs_dir=None,
                 koji_krb_principal=None,
                 koji_krb_keytab=None,
                 from_task_id=None,
                 poll_interval=5,
                 blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE,
                 repos=None,
                 architectures=None,
                 architecture=None,
                 koji_target=None):
        """
        :param tasker: DockerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param koji_hub: str, koji hub (xmlrpc)
        :param koji_proxyuser: str, proxy user
        :param koji_ssl_certs_dir: str, path to "cert", "ca", and "serverca"
        :param koji_krb_principal: str, name of Kerberos principal
        :param koji_krb_keytab: str, Kerberos keytab
        :param from_task_id: int, use existing Koji image task ID
        :param poll_interval: int, seconds between polling Koji while waiting
                              for task completion
        :param blocksize: int, chunk size for streaming files from koji
        :param repos: list<str>: list of yum repo URLs to be used during
                      base filesystem creation. First value will also
                      be used as install_tree. Only baseurl value is used
                      from each repo file.
        :param architectures: list<str>, list of arches to build on (orchestrator) - UNUSED
        :param architecture: str, arch to build on (worker)
        :param koji_target: str, koji target name
        """
        # call parent constructor
        super(AddFilesystemPlugin, self).__init__(tasker, workflow)

        self.koji_fallback = {
            'hub_url': koji_hub,
            'auth': {
                'proxyuser': koji_proxyuser,
                'ssl_certs_dir': koji_ssl_certs_dir,
                'krb_principal': str(koji_krb_principal),
                'krb_keytab_path': str(koji_krb_keytab)
            }
        }

        self.from_task_id = from_task_id
        self.poll_interval = poll_interval
        self.blocksize = blocksize
        self.repos = repos or []
        self.architectures = get_platforms(self.workflow)
        self.is_orchestrator = True if self.architectures else False
        self.architecture = architecture
        self.scratch = util.is_scratch_build()
        self.koji_target = koji_target
Пример #16
0
    def run(self):
        try:
            get_cachito(self.workflow)
        except KeyError:
            self.log.info(
                'Aborting plugin execution: missing Cachito configuration')
            return

        if self.workflow.source.config.remote_sources:
            raise ValueError(
                'Multiple remote sources are not supported, use single '
                'remote source in container.yaml')

        remote_source_params = self.workflow.source.config.remote_source
        if not remote_source_params:
            self.log.info(
                'Aborting plugin execution: missing remote_source configuration'
            )
            return

        if self._dependency_replacements and not is_scratch_build(
                self.workflow):
            raise ValueError(
                'Cachito dependency replacements are only allowed for scratch builds'
            )

        user = self.get_koji_user()
        self.log.info('Using user "%s" for cachito request', user)

        source_request = self.cachito_session.request_sources(
            user=user,
            dependency_replacements=self._dependency_replacements,
            **remote_source_params)
        source_request = self.cachito_session.wait_for_request(source_request)

        remote_source_json = self.source_request_to_json(source_request)
        remote_source_url = self.cachito_session.assemble_download_url(
            source_request)
        remote_source_conf_url = remote_source_json.get('configuration_files')
        remote_source_icm_url = remote_source_json.get('content_manifest')
        self.set_worker_params(source_request, remote_source_url,
                               remote_source_conf_url, remote_source_icm_url)

        dest_dir = self.workflow.source.workdir
        dest_path = self.cachito_session.download_sources(source_request,
                                                          dest_dir=dest_dir)

        return {
            # Annotations to be added to the current Build object
            'annotations': {
                'remote_source_url': remote_source_url
            },
            # JSON representation of the remote source request
            'remote_source_json': remote_source_json,
            # Local path to the remote source archive
            'remote_source_path': dest_path,
        }
Пример #17
0
    def run(self):
        if is_scratch_build(self.workflow):
            self.log.info('scratch build, skipping plugin')
            return

        if not (self.workflow.builder.dockerfile_images.base_from_scratch
                or self.workflow.builder.dockerfile_images.custom_base_image):
            self._base_image_nvr = self.detect_parent_image_nvr(
                self.workflow.builder.dockerfile_images.base_image,
                inspect_data=self.workflow.builder.base_image_inspect,
            )
            if is_rebuild(self.workflow):
                self.ignore_isolated_autorebuilds()

        manifest_mismatches = []
        for img, local_tag in self.workflow.builder.dockerfile_images.items():
            if base_image_is_custom(img.to_str()):
                continue

            nvr = self.detect_parent_image_nvr(
                local_tag) if local_tag else None
            self._parent_builds[img] = self.wait_for_parent_image_build(
                nvr) if nvr else None
            if nvr == self._base_image_nvr:
                self._base_image_build = self._parent_builds[img]

            if self._parent_builds[img]:
                # we need the possible floating tag
                check_img = copy(local_tag)
                check_img.tag = img.tag
                try:
                    self.check_manifest_digest(check_img,
                                               self._parent_builds[img])
                except ValueError as exc:
                    manifest_mismatches.append(exc)
            else:
                err_msg = (
                    'Could not get koji build info for parent image {}. '
                    'Was this image built in OSBS?'.format(img.to_str()))
                if get_skip_koji_check_for_base_image(self.workflow,
                                                      fallback=False):
                    self.log.warning(err_msg)
                else:
                    self.log.error(err_msg)
                    raise RuntimeError(err_msg)

        if manifest_mismatches:
            mismatch_msg = (
                'Error while comparing parent images manifest digests in koji with '
                'related values from registries: %s')
            if get_fail_on_digest_mismatch(self.workflow, fallback=True):
                self.log.error(mismatch_msg, manifest_mismatches)
                raise RuntimeError(mismatch_msg % manifest_mismatches)

            self.log.warning(mismatch_msg, manifest_mismatches)
        return self.make_result()
Пример #18
0
    def run(self):
        if is_scratch_build(self.workflow):
            self.log.info('scratch build, skipping plugin')
            return

        df_images = self.workflow.data.dockerfile_images
        if not (df_images.base_from_scratch or df_images.custom_base_image):
            self._base_image_nvr = self.detect_parent_image_nvr(
                df_images.base_image,
                # Inspect any platform: the N-V-R labels should be equal for all platforms
                inspect_data=self.workflow.imageutil.base_image_inspect(),
            )

        manifest_mismatches = []
        for img, local_tag in df_images.items():
            img_str = img.to_str()
            if base_image_is_custom(img_str):
                continue

            nvr = self.detect_parent_image_nvr(
                local_tag) if local_tag else None
            parent_build_info = self.wait_for_parent_image_build(
                nvr) if nvr else None
            self._parent_builds[img_str] = parent_build_info

            if nvr == self._base_image_nvr:
                self._base_image_build = parent_build_info

            if parent_build_info:
                # we need the possible floating tag
                check_img = copy(local_tag)
                check_img.tag = img.tag
                try:
                    self.check_manifest_digest(check_img, parent_build_info)
                except ValueError as exc:
                    manifest_mismatches.append(exc)
            else:
                err_msg = (
                    f'Could not get koji build info for parent image {img_str}. '
                    f'Was this image built in OSBS?')
                if self.workflow.conf.skip_koji_check_for_base_image:
                    self.log.warning(err_msg)
                else:
                    self.log.error(err_msg)
                    raise RuntimeError(err_msg)

        if manifest_mismatches:
            mismatch_msg = (
                'Error while comparing parent images manifest digests in koji with '
                'related values from registries: %s')
            if self.workflow.conf.fail_on_digest_mismatch:
                self.log.error(mismatch_msg, manifest_mismatches)
                raise RuntimeError(mismatch_msg % manifest_mismatches)

            self.log.warning(mismatch_msg, manifest_mismatches)
        return self.make_result()
Пример #19
0
    def run(self):
        """
        run the plugin
        """
        if self.koji_target:
            koji_session = get_koji_session(self.workflow, NO_FALLBACK)
            self.log.info("Checking koji target for platforms")
            event_id = koji_session.getLastEvent()['id']
            target_info = koji_session.getBuildTarget(self.koji_target,
                                                      event=event_id)
            build_tag = target_info['build_tag']
            koji_build_conf = koji_session.getBuildConfig(build_tag,
                                                          event=event_id)
            koji_platforms = koji_build_conf['arches']
            if not koji_platforms:
                self.log.info("No platforms found in koji target")
                return None
            platforms = koji_platforms.split()
            self.log.info("Koji platforms are %s", sorted(platforms))

            if is_scratch_build() or is_isolated_build():
                override_platforms = get_orchestrator_platforms(self.workflow)
                if override_platforms and set(override_platforms) != set(
                        platforms):
                    sort_platforms = sorted(override_platforms)
                    self.log.info("Received user specified platforms %s",
                                  sort_platforms)
                    self.log.info("Using them instead of koji platforms")
                    # platforms from user params do not match platforms from koji target
                    # that almost certainly means they were overridden and should be used
                    return set(override_platforms)
        else:
            platforms = get_orchestrator_platforms(self.workflow)
            self.log.info("No koji platforms. User specified platforms are %s",
                          sorted(platforms))

        if not platforms:
            raise RuntimeError(
                "Cannot determine platforms; no koji target or platform list")

        # Filter platforms based on clusters
        enabled_platforms = []
        for p in platforms:
            if self.reactor_config.get_enabled_clusters_for_platform(p):
                enabled_platforms.append(p)
            else:
                self.log.warning(
                    "No cluster found for platform '%s' in reactor config map, skipping",
                    p)

        final_platforms = get_platforms_in_limits(self.workflow,
                                                  enabled_platforms)

        self.log.info("platforms in limits : %s", final_platforms)
        return final_platforms
Пример #20
0
    def run(self):
        """
        Run the plugin.
        """

        # get the session and token information in case we need to refund a failed build
        self.session = get_koji_session(self.workflow.conf)

        server_dir = self.get_server_dir()
        koji_metadata = self.combine_metadata_fragments()

        if is_scratch_build(self.workflow):
            self.upload_scratch_metadata(koji_metadata, server_dir)
            return

        # for all builds which have koji task
        if self.koji_task_id:
            task_info = self.session.getTaskInfo(self.koji_task_id)
            task_state = koji.TASK_STATES[task_info['state']]
            if task_state != 'OPEN':
                self.log.error(
                    "Koji task is not in Open state, but in %s, not importing build",
                    task_state)
                return

        self._upload_output_files(server_dir)

        build_token = self.workflow.data.reserved_token
        build_id = self.workflow.data.reserved_build_id

        if build_id is not None and build_token is not None:
            koji_metadata['build']['build_id'] = build_id

        try:
            if build_token:
                build_info = self.session.CGImport(koji_metadata,
                                                   server_dir,
                                                   token=build_token)
            else:
                build_info = self.session.CGImport(koji_metadata, server_dir)

        except Exception:
            self.log.debug("metadata: %r", koji_metadata)
            raise

        # Older versions of CGImport do not return a value.
        build_id = build_info.get("id") if build_info else None

        self.log.debug("Build information: %s",
                       json.dumps(build_info, sort_keys=True, indent=4))

        return build_id
    def allow_inheritance(self):
        """Returns boolean if composes can be inherited"""
        if not self.workflow.source.config.inherit:
            return False
        self.log.info("Inheritance requested in config file")

        if is_scratch_build() or is_isolated_build():
            self.log.warning(
                "Inheritance is not allowed for scratch or isolated builds. "
                "Skipping inheritance.")
            return False

        return True
Пример #22
0
    def next_release_general(
        self, component: str, version: str, release: Optional[str] = None
    ) -> str:
        """Get next release for build."""
        if is_scratch_build(self.workflow):
            # no need to append for scratch build
            next_release = self.workflow.pipeline_run_name
        elif self.append:
            next_release = self.get_next_release_append(component, version, release)
        else:
            next_release = self.get_next_release_standard(component, version)

        return next_release
    def __init__(self, tasker, workflow, koji_hub=None,
                 koji_proxyuser=None, koji_ssl_certs_dir=None,
                 koji_krb_principal=None, koji_krb_keytab=None,
                 from_task_id=None, poll_interval=5,
                 blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE,
                 repos=None, architectures=None,
                 architecture=None, koji_target=None):
        """
        :param tasker: DockerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param koji_hub: str, koji hub (xmlrpc)
        :param koji_proxyuser: str, proxy user
        :param koji_ssl_certs_dir: str, path to "cert", "ca", and "serverca"
        :param koji_krb_principal: str, name of Kerberos principal
        :param koji_krb_keytab: str, Kerberos keytab
        :param from_task_id: int, use existing Koji image task ID
        :param poll_interval: int, seconds between polling Koji while waiting
                              for task completion
        :param blocksize: int, chunk size for streaming files from koji
        :param repos: list<str>: list of yum repo URLs to be used during
                      base filesystem creation. First value will also
                      be used as install_tree. Only baseurl value is used
                      from each repo file.
        :param architectures: list<str>, list of arches to build on (orchestrator) - UNUSED
        :param architecture: str, arch to build on (worker)
        :param koji_target: str, koji target name
        """
        # call parent constructor
        super(AddFilesystemPlugin, self).__init__(tasker, workflow)

        self.koji_fallback = {
            'hub_url': koji_hub,
            'auth': {
                'proxyuser': koji_proxyuser,
                'ssl_certs_dir': koji_ssl_certs_dir,
                'krb_principal': str(koji_krb_principal),
                'krb_keytab_path': str(koji_krb_keytab)
            }
        }

        self.from_task_id = from_task_id
        self.poll_interval = poll_interval
        self.blocksize = blocksize
        self.repos = repos or []
        self.architectures = get_platforms(self.workflow)
        self.is_orchestrator = True if self.architectures else False
        self.architecture = architecture
        self.scratch = util.is_scratch_build()
        self.koji_target = koji_target
Пример #24
0
    def allow_inheritance(self):
        """Returns boolean if composes can be inherited"""
        if not self.workflow.source.config.inherit:
            return False
        self.log.info("Inheritance requested in container.yaml file")

        if is_scratch_build(self.workflow) or is_isolated_build(self.workflow):
            msg = ("'inherit: true' in the compose section of container.yaml "
                   "is not allowed for scratch or isolated builds. "
                   "Skipping inheritance.")
            self.log.warning(msg)
            self.log.user_warning(message=msg)
            return False

        return True
Пример #25
0
    def run(self):
        """
        run the plugin
        """

        parser = df_parser(self.workflow.builder.df_path,
                           workflow=self.workflow)
        dockerfile_labels = parser.labels
        labels = Labels(dockerfile_labels)

        try:
            _, release = labels.get_name_and_value(Labels.LABEL_TYPE_RELEASE)
            if not self.append:
                self.log.debug("release set explicitly so not incrementing")
                return
        except KeyError:
            release = None

        component_label = labels.get_name(Labels.LABEL_TYPE_COMPONENT)

        try:
            component = dockerfile_labels[component_label]
        except KeyError:
            raise RuntimeError("missing label: {}".format(component_label))

        version_label = labels.get_name(Labels.LABEL_TYPE_VERSION)
        try:
            version = dockerfile_labels[version_label]
        except KeyError:
            raise RuntimeError('missing label: {}'.format(version_label))

        if self.append:
            next_release = self.get_next_release_append(
                component, version, release)
        elif is_scratch_build():
            metadata = get_build_json().get("metadata", {})
            next_release = metadata.get("name", "1")
        else:
            next_release = self.get_next_release_standard(component, version)

        # Always set preferred release label - other will be set if old-style
        # label is present
        release_label = labels.LABEL_NAMES[Labels.LABEL_TYPE_RELEASE][0]

        # No release labels are set so set them
        self.log.info("setting %s=%s", release_label, next_release)
        # Write the label back to the file (this is a property setter)
        dockerfile_labels[release_label] = next_release
    def run(self):
        """
        run the plugin
        """
        if self.koji_target:
            koji_session = get_koji_session(self.workflow, NO_FALLBACK)
            self.log.info("Checking koji target for platforms")
            event_id = koji_session.getLastEvent()['id']
            target_info = koji_session.getBuildTarget(self.koji_target, event=event_id)
            build_tag = target_info['build_tag']
            koji_build_conf = koji_session.getBuildConfig(build_tag, event=event_id)
            koji_platforms = koji_build_conf['arches']
            if not koji_platforms:
                self.log.info("No platforms found in koji target")
                return None
            platforms = koji_platforms.split()
            self.log.info("Koji platforms are %s", sorted(platforms))

            if is_scratch_build() or is_isolated_build():
                override_platforms = get_orchestrator_platforms(self.workflow)
                if override_platforms and set(override_platforms) != set(platforms):
                    sort_platforms = sorted(override_platforms)
                    self.log.info("Received user specified platforms %s", sort_platforms)
                    self.log.info("Using them instead of koji platforms")
                    # platforms from user params do not match platforms from koji target
                    # that almost certainly means they were overridden and should be used
                    return set(override_platforms)
        else:
            platforms = get_orchestrator_platforms(self.workflow)
            self.log.info("No koji platforms. User specified platforms are %s", sorted(platforms))

        if not platforms:
            raise RuntimeError("Cannot determine platforms; no koji target or platform list")

        # Filter platforms based on clusters
        enabled_platforms = []
        for p in platforms:
            if self.reactor_config.get_enabled_clusters_for_platform(p):
                enabled_platforms.append(p)
            else:
                self.log.warning(
                    "No cluster found for platform '%s' in reactor config map, skipping", p)

        final_platforms = get_platforms_in_limits(self.workflow, enabled_platforms)

        self.log.info("platforms in limits : %s", final_platforms)
        return final_platforms
Пример #27
0
 def __init__(self,
              tasker,
              workflow,
              koji_hub,
              koji_proxyuser=None,
              koji_ssl_certs_dir=None,
              koji_krb_principal=None,
              koji_krb_keytab=None,
              from_task_id=None,
              poll_interval=5,
              blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE,
              repos=None):
     """
     :param tasker: DockerTasker instance
     :param workflow: DockerBuildWorkflow instance
     :param koji_hub: str, koji hub (xmlrpc)
     :param koji_proxyuser: str, proxy user
     :param koji_ssl_certs_dir: str, path to "cert", "ca", and "serverca"
     :param koji_krb_principal: str, name of Kerberos principal
     :param koji_krb_keytab: str, Kerberos keytab
     :param from_task_id: int, use existing Koji image task ID
     :param poll_interval: int, seconds between polling Koji while waiting
                           for task completion
     :param blocksize: int, chunk size for streaming files from koji
     :param repos: list<str>: list of yum repo URLs to be used during
                   base filesystem creation. First value will also
                   be used as install_tree. Only baseurl value is used
                   from each repo file.
     """
     # call parent constructor
     super(AddFilesystemPlugin, self).__init__(tasker, workflow)
     self.koji_hub = koji_hub
     self.koji_auth_info = {
         'proxyuser': koji_proxyuser,
         'ssl_certs_dir': koji_ssl_certs_dir,
         'krb_principal': koji_krb_principal,
         'krb_keytab': koji_krb_keytab,
     }
     self.from_task_id = from_task_id
     self.poll_interval = poll_interval
     self.blocksize = blocksize
     self.repos = repos or []
     self.scratch = util.is_scratch_build()
Пример #28
0
    def next_release_general(self, component, version, release, release_label,
                             dockerfile_labels):
        """
        get next release for build and set it in dockerfile
        """
        if is_scratch_build(self.workflow):
            # no need to append for scratch build
            metadata = get_build_json().get("metadata", {})
            next_release = metadata.get("name", "1")
        elif self.append:
            next_release = self.get_next_release_append(
                component, version, release)
        else:
            next_release = self.get_next_release_standard(component, version)

        # No release labels are set so set them
        self.log.info("setting %s=%s", release_label, next_release)
        # Write the label back to the file (this is a property setter)
        dockerfile_labels[release_label] = next_release
    def should_run(self):
        """
        Check if the plugin should run or skip execution.

        :return: bool, False if plugin should skip execution
        """
        if self.is_orchestrator():
            self.log.warning("%s plugin set to run on orchestrator. Skipping", self.key)
            return False
        if self.operator_manifests_extract_platform != self.platform:
            self.log.info("Only platform [%s] will upload operators metadata. Skipping",
                          self.operator_manifests_extract_platform)
            return False
        if is_scratch_build():
            self.log.info("Scratch build. Skipping")
            return False
        if not self.has_operator_manifest():
            self.log.info("Operator manifests label not set in Dockerfile. Skipping")
            return False
        return True
    def run(self):
        """
        Run the plugin.
        """
        # Only run if the build was successful
        if self.workflow.build_process_failed:
            self.log.info("Not importing failed build to koji")
            return

        self.session = get_koji_session(self.workflow, self.koji_fallback)

        server_dir = get_koji_upload_dir(self.workflow)

        koji_metadata, output_files = self.combine_metadata_fragments()

        if is_scratch_build():
            self.upload_scratch_metadata(koji_metadata, server_dir, self.session)
            return

        try:
            for output in output_files:
                if output.file:
                    self.upload_file(self.session, output, server_dir)
        finally:
            for output in output_files:
                if output.file:
                    output.file.close()

        try:
            build_info = self.session.CGImport(koji_metadata, server_dir)
        except Exception:
            self.log.debug("metadata: %r", koji_metadata)
            raise

        # Older versions of CGImport do not return a value.
        build_id = build_info.get("id") if build_info else None

        self.log.debug("Build information: %s",
                       json.dumps(build_info, sort_keys=True, indent=4))

        return build_id
Пример #31
0
    def run(self):
        """
        run the plugin
        """
        if is_scratch_build(self.workflow):
            self.log.info('scratch build, skipping plugin')
            return False
        if is_isolated_build(self.workflow):
            self.log.info('isolated build, skipping plugin')
            return False

        if self.workflow.builder.dockerfile_images.base_from_scratch:
            self.log.info(
                "Skipping check and set rebuild: unsupported for FROM-scratch images"
            )
            return False
        if self.workflow.builder.dockerfile_images.custom_base_image:
            self.log.info(
                "Skipping check and set rebuild: unsupported for custom base images"
            )
            return False

        metadata = get_build_json().get("metadata", {})
        self.build_labels = metadata.get("labels", {})
        buildconfig = self.build_labels["buildconfig"]
        is_rebuild = self.build_labels.get(self.label_key) == self.label_value
        self.log.info("This is a rebuild? %s", is_rebuild)

        if not is_rebuild:
            # Update the BuildConfig metadata so the next Build
            # instantiated from it is detected as being an automated
            # rebuild
            osbs = get_openshift_session(self.workflow,
                                         self.openshift_fallback)
            new_labels = {self.label_key: self.label_value}
            osbs.update_labels_on_build_config(buildconfig, new_labels)
        else:
            self.pull_latest_commit_if_configured()

        return is_rebuild
    def run(self):
        """
        Run the plugin.
        """
        # Only run if the build was successful
        if self.workflow.build_process_failed:
            self.log.info("Not promoting failed build to koji")
            return

        koji_metadata, output_files = self.get_metadata()

        if not is_scratch_build():
            try:
                session = get_koji_session(self.workflow, self.koji_fallback)
                for output in output_files:
                    if output.file:
                        self.upload_file(session, output, self.koji_upload_dir)
            finally:
                for output in output_files:
                    if output.file:
                        output.file.close()

        md_fragment = "{}-md".format(get_build_json()['metadata']['name'])
        md_fragment_key = 'metadata.json'
        cm_data = {md_fragment_key: koji_metadata}
        annotations = {
            "metadata_fragment": "configmap/" + md_fragment,
            "metadata_fragment_key": md_fragment_key
        }

        try:
            self.osbs.create_config_map(md_fragment, cm_data)
        except OsbsException:
            self.log.debug("metadata: %r", koji_metadata)
            self.log.debug("annotations: %r", annotations)
            raise

        return annotations
Пример #33
0
    def run(self):
        """
        Run the plugin.
        """
        # Only run if the build was successful
        if self.workflow.build_process_failed:
            self.log.info("Not promoting failed build to koji")
            return

        koji_metadata, output_files = self.get_metadata()

        if not is_scratch_build():
            try:
                session = get_koji_session(self.workflow, self.koji_fallback)
                for output in output_files:
                    if output.file:
                        self.upload_file(session, output, self.koji_upload_dir)
            finally:
                for output in output_files:
                    if output.file:
                        output.file.close()

        md_fragment = "{}-md".format(get_build_json()['metadata']['name'])
        md_fragment_key = 'metadata.json'
        cm_data = {md_fragment_key: koji_metadata}
        annotations = {
            "metadata_fragment": "configmap/" + md_fragment,
            "metadata_fragment_key": md_fragment_key
        }

        try:
            self.osbs.create_config_map(md_fragment, cm_data)
        except OsbsException:
            self.log.debug("metadata: %r", koji_metadata)
            self.log.debug("annotations: %r", annotations)
            raise

        return annotations
    def should_run(self):
        """
        Check if the plugin should run or skip execution.

        :return: bool, False if plugin should skip execution
        """
        if self.is_in_orchestrator():
            self.log.warning("%s plugin set to run on orchestrator. Skipping", self.key)
            return False
        if self.operator_manifests_extract_platform != self.platform:
            self.log.info("Only platform [%s] will upload operators metadata. Skipping",
                          self.operator_manifests_extract_platform)
            return False
        if is_scratch_build():
            self.log.info("Scratch build. Skipping")
            return False
        if not (
            has_operator_bundle_manifest(self.workflow) or
            has_operator_appregistry_manifest(self.workflow)
        ):
            self.log.info("Operator manifests label not set in Dockerfile. Skipping")
            return False
        return True
    def run(self):
        """
        run the plugin
        """
        koji_session = get_koji_session(self.workflow, NO_FALLBACK)
        self.log.info("Checking koji target for platforms")
        event_id = koji_session.getLastEvent()['id']
        target_info = koji_session.getBuildTarget(self.koji_target, event=event_id)
        build_tag = target_info['build_tag']
        koji_build_conf = koji_session.getBuildConfig(build_tag, event=event_id)
        koji_platforms = koji_build_conf['arches']
        if not koji_platforms:
            self.log.info("No platforms found in koji target")
            return None
        platforms = koji_platforms.split()

        if is_scratch_build() or is_isolated_build():
            override_platforms = get_orchestrator_platforms(self.workflow)
            if override_platforms and set(override_platforms) != koji_platforms:
                # platforms from user params do not match platforms from koji target
                # that almost certainly means they were overridden and should be used
                return set(override_platforms)

        return get_platforms_in_limits(self.workflow, platforms)
    def __init__(self, tasker, workflow,
                 odcs_url=None,
                 odcs_insecure=False,
                 odcs_openidc_secret_path=None,
                 odcs_ssl_secret_path=None,
                 koji_target=None,
                 koji_hub=None,
                 koji_ssl_certs_dir=None,
                 signing_intent=None,
                 compose_ids=tuple(),
                 repourls=None,
                 minimum_time_to_expire=MINIMUM_TIME_TO_EXPIRE,
                 ):
        """
        :param tasker: DockerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param odcs_url: URL of ODCS (On Demand Compose Service)
        :param odcs_insecure: If True, don't check SSL certificates for `odcs_url`
        :param odcs_openidc_secret_path: directory to look in for a `token` file
        :param odcs_ssl_secret_path: directory to look in for `cert` file - a PEM file
                                     containing both cert and key
        :param koji_target: str, contains build tag to be used when requesting compose from "tag"
        :param koji_hub: str, koji hub (xmlrpc), required if koji_target is used
        :param koji_ssl_certs_dir: str, path to "cert", and "serverca"
                                   used when Koji's identity certificate is not trusted
        :param signing_intent: override the signing intent from git repo configuration
        :param compose_ids: use the given compose_ids instead of requesting a new one
        :param repourls: list of str, URLs to the repo files
        :param minimum_time_to_expire: int, used in deciding when to extend compose's time
                                       to expire in seconds
        """
        super(ResolveComposesPlugin, self).__init__(tasker, workflow)

        if signing_intent and compose_ids:
            raise ValueError('signing_intent and compose_ids cannot be used at the same time')

        self.signing_intent = signing_intent
        self.compose_ids = compose_ids
        self.odcs_fallback = {
            'api_url': odcs_url,
            'insecure': odcs_insecure,
            'auth': {
                'ssl_certs_dir': odcs_ssl_secret_path,
                'openidc_dir': odcs_openidc_secret_path
            }
        }

        self.koji_target = koji_target
        self.koji_fallback = {
            'hub_url': koji_hub,
            'auth': {
                'ssl_certs_dir': koji_ssl_certs_dir
            }
        }
        if koji_target:
            if not get_koji(self.workflow, self.koji_fallback)['hub_url']:
                raise ValueError('koji_hub is required when koji_target is used')

        self.minimum_time_to_expire = minimum_time_to_expire

        self._koji_session = None
        self._odcs_client = None
        self.odcs_config = None
        self.compose_config = None
        self.composes_info = None
        self._parent_signing_intent = None
        self.repourls = repourls or []
        self.inherit = self.workflow.source.config.inherit
        self.plugin_result = self.workflow.prebuild_results.get(PLUGIN_KOJI_PARENT_KEY)
        self.allow_inheritance = self.inherit and not (is_scratch_build() or is_isolated_build())
        self.all_compose_ids = list(self.compose_ids)
Пример #37
0
    def set_group_manifest_info(self, extra, worker_metadatas):
        version_release = None
        primary_images = get_primary_images(self.workflow)
        floating_images = get_floating_images(self.workflow)
        unique_images = get_unique_images(self.workflow)
        if primary_images:
            version_release = primary_images[0].tag

        if is_scratch_build(self.workflow):
            tags = [image.tag for image in self.workflow.tag_conf.images]
            version_release = tags[0]
        else:
            assert version_release is not None, 'Unable to find version-release image'
            tags = [image.tag for image in primary_images]

        floating_tags = [image.tag for image in floating_images]
        unique_tags = [image.tag for image in unique_images]

        manifest_data = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY, {})
        if manifest_data and is_manifest_list(manifest_data.get("media_type")):
            manifest_digest = manifest_data.get("manifest_digest")
            index = {}
            index['tags'] = tags
            index['floating_tags'] = floating_tags
            index['unique_tags'] = unique_tags
            build_image = get_unique_images(self.workflow)[0]
            repo = ImageName.parse(build_image).to_str(registry=False, tag=False)
            # group_manifests added the registry, so this should be valid
            registries = self.workflow.push_conf.all_registries

            digest_version = get_manifest_media_version(manifest_digest)
            digest = manifest_digest.default

            for registry in registries:
                pullspec = "{0}/{1}@{2}".format(registry.uri, repo, digest)
                index['pull'] = [pullspec]
                pullspec = "{0}/{1}:{2}".format(registry.uri, repo,
                                                version_release)
                index['pull'].append(pullspec)

                # Store each digest with according media type
                index['digests'] = {}
                media_type = get_manifest_media_type(digest_version)
                index['digests'][media_type] = digest

                break
            extra['image']['index'] = index
        # group_manifests returns None if didn't run, {} if group=False
        else:
            for platform in worker_metadatas:
                if platform == "x86_64":
                    for instance in worker_metadatas[platform]['output']:
                        if instance['type'] == 'docker-image':
                            # koji_upload, running in the worker, doesn't have the full tags
                            # so set them here
                            instance['extra']['docker']['tags'] = tags
                            instance['extra']['docker']['floating_tags'] = floating_tags
                            instance['extra']['docker']['unique_tags'] = unique_tags
                            repositories = []
                            for pullspec in instance['extra']['docker']['repositories']:
                                if '@' not in pullspec:
                                    image = ImageName.parse(pullspec)
                                    image.tag = version_release
                                    pullspec = image.to_str()

                                repositories.append(pullspec)

                            instance['extra']['docker']['repositories'] = repositories
                            self.log.debug("reset tags to so that docker is %s",
                                           instance['extra']['docker'])
                            annotations = get_worker_build_info(self.workflow, platform).\
                                build.get_annotations()

                            digests = {}
                            if 'digests' in annotations:
                                digests = get_digests_map_from_annotations(annotations['digests'])
                                instance['extra']['docker']['digests'] = digests
Пример #38
0
    def run(self):
        """
        Run the plugin.
        """

        # get the session and token information in case we need to refund a failed build
        self.session = get_koji_session(self.workflow)
        build_token = self.workflow.reserved_token
        build_id = self.workflow.reserved_build_id

        # Only run if the build was successful
        if self.workflow.build_process_failed:
            self.log.info("Not importing %s build to koji",
                          "canceled" if self.workflow.build_canceled else "failed")
            if self.reserve_build and build_token is not None:
                state = koji.BUILD_STATES['FAILED']
                if self.workflow.build_canceled:
                    state = koji.BUILD_STATES['CANCELED']
                self.session.CGRefundBuild(PROG, build_id, build_token, state)
            return

        server_dir = self.get_server_dir()

        koji_metadata, output_files = self.combine_metadata_fragments()

        if is_scratch_build(self.workflow):
            self.upload_scratch_metadata(koji_metadata, server_dir, self.session)
            return

        # for all builds which have koji task, except for rebuild without delegate enabled,
        # because such rebuild is reusing original task which won't be anymore OPEN
        if self.koji_task_id and (not self.rebuild or (self.rebuild and self.delegate_enabled)):
            task_info = self.session.getTaskInfo(self.koji_task_id)
            task_state = koji.TASK_STATES[task_info['state']]
            if task_state != 'OPEN':
                self.log.error("Koji task is not in Open state, but in %s, not importing build",
                               task_state)

                if self.reserve_build and build_token is not None:
                    state = koji.BUILD_STATES['FAILED']
                    self.session.CGRefundBuild(PROG, build_id, build_token, state)
                return

        try:
            for output in output_files:
                if output.file:
                    self.upload_file(self.session, output, server_dir)
        finally:
            for output in output_files:
                if output.file:
                    output.file.close()

        if build_id is not None and build_token is not None:
            koji_metadata['build']['build_id'] = build_id

        try:
            if build_token:
                build_info = self.session.CGImport(koji_metadata, server_dir, token=build_token)
            else:
                build_info = self.session.CGImport(koji_metadata, server_dir)

        except Exception:
            self.log.debug("metadata: %r", koji_metadata)
            raise

        # Older versions of CGImport do not return a value.
        build_id = build_info.get("id") if build_info else None

        self.log.debug("Build information: %s",
                       json.dumps(build_info, sort_keys=True, indent=4))

        return build_id
    def set_group_manifest_info(self, extra, worker_metadatas):
        version_release = None
        primary_images = get_primary_images(self.workflow)
        if primary_images:
            version_release = primary_images[0].tag

        if is_scratch_build():
            tags = [image.tag for image in self.workflow.tag_conf.images]
            version_release = tags[0]
        else:
            assert version_release is not None, 'Unable to find version-release image'
            tags = [image.tag for image in primary_images]

        manifest_list_digests = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY)
        if manifest_list_digests:
            index = {}
            index['tags'] = tags
            repositories = self.workflow.build_result.annotations['repositories']['unique']
            repo = ImageName.parse(repositories[0]).to_str(registry=False, tag=False)
            # group_manifests added the registry, so this should be valid
            registries = self.workflow.push_conf.pulp_registries
            if not registries:
                registries = self.workflow.push_conf.all_registries
            for registry in registries:
                manifest_list_digest = manifest_list_digests[repo]
                pullspec = "{0}/{1}@{2}".format(registry.uri, repo, manifest_list_digest.default)
                index['pull'] = [pullspec]
                pullspec = "{0}/{1}:{2}".format(registry.uri, repo,
                                                version_release)
                index['pull'].append(pullspec)

                # Store each digest with according media type
                index['digests'] = {}
                for version, digest in manifest_list_digest.items():
                    if digest:
                        media_type = get_manifest_media_type(version)
                        index['digests'][media_type] = digest
                break
            extra['image']['index'] = index
        # group_manifests returns None if didn't run, {} if group=False
        else:
            for platform in worker_metadatas:
                if platform == "x86_64":
                    for instance in worker_metadatas[platform]['output']:
                        if instance['type'] == 'docker-image':
                            # koji_upload, running in the worker, doesn't have the full tags
                            # so set them here
                            instance['extra']['docker']['tags'] = tags
                            repositories = []
                            for pullspec in instance['extra']['docker']['repositories']:
                                if '@' not in pullspec:
                                    image = ImageName.parse(pullspec)
                                    image.tag = version_release
                                    pullspec = image.to_str()

                                repositories.append(pullspec)

                            instance['extra']['docker']['repositories'] = repositories
                            self.log.debug("reset tags to so that docker is %s",
                                           instance['extra']['docker'])
                            annotations = get_worker_build_info(self.workflow, platform).\
                                build.get_annotations()
                            digests = {}
                            if 'digests' in annotations:
                                digests = get_digests_map_from_annotations(annotations['digests'])
                                instance['extra']['docker']['digests'] = digests