示例#1
0
    def delete_v1_layers(self, repo_prefix="redhat-"):
        annotations = self.workflow.build_result.annotations
        if not annotations:
            # No worker builds created
            return

        worker_builds = annotations['worker-builds']

        for platform in worker_builds:
            build_info = get_worker_build_info(self.workflow, platform)
            annotations = build_info.build.get_annotations() or {}
            v1_image_id = annotations.get('v1-image-id')
            if v1_image_id:
                image_names = self.workflow.tag_conf.images
                self.pulp_handler.create_dockpulp()
                if not repo_prefix:
                    repo_prefix = ''
                pulp_repos = set([
                    '%s%s' % (repo_prefix, image.pulp_repo)
                    for image in image_names
                ])
                for repo_id in pulp_repos:
                    self.log.info("removing %s from repo %s", v1_image_id,
                                  repo_id)
                    self.pulp_handler.remove_image(repo_id, v1_image_id)
示例#2
0
    def run(self):
        """
        Run the plugin.
        """
        build_result = self.workflow.build_result

        worker_builds = build_result.annotations['worker-builds']

        for platform, build_annotations in worker_builds.items():
            try:
                cm_key, _ = get_platform_config(platform, build_annotations)
            except BadConfigMapError:
                continue

            build_info = get_worker_build_info(self.workflow, platform)
            osbs = build_info.osbs

            try:
                osbs.delete_config_map(cm_key)
                self.log.debug("ConfigMap %s on platform %s deleted", cm_key,
                               platform)
            except OsbsResponseException as ex:
                self.log.warning(
                    "Failed to delete ConfigMap %s on platform %s: %s", cm_key,
                    platform, ex)
    def run(self):
        """
        Run the plugin.
        """
        build_result = self.workflow.build_result

        if not build_result.annotations:
            self.log.info("No build annotations found, skipping plugin")
            return

        worker_builds = build_result.annotations.get('worker-builds', {})

        for platform, build_annotations in worker_builds.items():
            try:
                if ('metadata_fragment' not in build_annotations
                        or 'metadata_fragment_key' not in build_annotations):
                    continue

                cm_key, _ = get_platform_config(platform, build_annotations)
            except BadConfigMapError:
                continue

            build_info = get_worker_build_info(self.workflow, platform)
            osbs = build_info.osbs

            try:
                osbs.delete_config_map(cm_key)
                self.log.debug("ConfigMap %s on platform %s deleted", cm_key,
                               platform)
            except OsbsResponseException as ex:
                self.log.warning(
                    "Failed to delete ConfigMap %s on platform %s: %s", cm_key,
                    platform, ex)
示例#4
0
    def run(self):
        """
        Run the plugin.
        """

        worker_builds = self.workflow.build_result.annotations['worker-builds']
        has_v1_image_id = None
        repo_tags = {}

        for platform in worker_builds:
            build_info = get_worker_build_info(self.workflow, platform)
            annotations = build_info.build.get_annotations()
            v1_image_id = annotations.get('v1-image-id')
            if v1_image_id:
                if has_v1_image_id:
                    msg = "two platforms with v1-image-ids: {0} and {1}".format(
                        platform, has_v1_image_id)
                    raise RuntimeError(msg)
                has_v1_image_id = platform
                self.log.info("tagging v1-image-id %s for platform %s",
                              v1_image_id, platform)
                ret_val = self.set_v1_tags(v1_image_id)
                if ret_val:
                    repo_tags = ret_val
        return repo_tags
    def set_media_types(self, extra, worker_metadatas):
        media_types = []
        for platform in worker_metadatas:
            annotations = get_worker_build_info(self.workflow, platform).build.get_annotations()
            if annotations.get('media-types'):
                media_types = json.loads(annotations['media-types'])
                break

        # Append media_types from pulp pull
        pulp_pull_results = self.workflow.exit_results.get(PLUGIN_PULP_PULL_KEY)
        if pulp_pull_results:
            media_types += pulp_pull_results

        if media_types:
            extra['image']['media_types'] = sorted(list(set(media_types)))
示例#6
0
    def set_media_types(self, extra, worker_metadatas):
        media_types = []
        for platform in worker_metadatas:
            annotations = get_worker_build_info(self.workflow, platform).build.get_annotations()
            if annotations.get('media-types'):
                media_types = json.loads(annotations['media-types'])
                break

        # Append media_types from pulp pull
        pulp_pull_results = self.workflow.exit_results.get(PLUGIN_PULP_PULL_KEY)
        if pulp_pull_results:
            media_types += pulp_pull_results[1]

        if media_types:
            extra['image']['media_types'] = sorted(list(set(media_types)))
示例#7
0
 def set_help(self, extra, worker_metadatas):
     all_annotations = [get_worker_build_info(self.workflow, platform).build.get_annotations()
                        for platform in worker_metadatas]
     help_known = ['help_file' in annotations for annotations in all_annotations]
     # Only set the 'help' key when any 'help_file' annotation is set
     if any(help_known):
         # See if any are not None
         for known, annotations in zip(help_known, all_annotations):
             if known:
                 help_file = json.loads(annotations['help_file'])
                 if help_file is not None:
                     extra['image']['help'] = help_file
                     break
         else:
             # They are all None
             extra['image']['help'] = None
 def set_help(self, extra, worker_metadatas):
     all_annotations = [get_worker_build_info(self.workflow, platform).build.get_annotations()
                        for platform in worker_metadatas]
     help_known = ['help_file' in annotations for annotations in all_annotations]
     # Only set the 'help' key when any 'help_file' annotation is set
     if any(help_known):
         # See if any are not None
         for known, annotations in zip(help_known, all_annotations):
             if known:
                 help_file = json.loads(annotations['help_file'])
                 if help_file is not None:
                     extra['image']['help'] = help_file
                     break
         else:
             # They are all None
             extra['image']['help'] = None
    def get_platform_metadata(self, platform, build_annotations):
        """
        Return the metadata for the given platform.
        """
        # retrieve all the workspace data
        cm_key, cm_frag_key = get_platform_config(platform, build_annotations)

        build_info = get_worker_build_info(self.workflow, platform)
        osbs = build_info.osbs
        try:
            cm_data = osbs.get_config_map(cm_key)
        except Exception:
            self.log.error("Failed to get ConfigMap for platform %s", platform)
            raise

        metadata = cm_data.get_data_by_key(cm_frag_key)
        return metadata
示例#10
0
    def set_media_types(self, extra, worker_metadatas):
        media_types = []
        if not self.source_build:
            for platform in worker_metadatas:
                annotations = get_worker_build_info(self.workflow,
                                                    platform).build.get_annotations()
                if annotations.get('media-types'):
                    media_types = json.loads(annotations['media-types'])
                    break

        # Append media_types from verify images
        media_results = self.workflow.exit_results.get(PLUGIN_VERIFY_MEDIA_KEY)
        if media_results:
            media_types += media_results

        if media_types:
            extra['image']['media_types'] = sorted(list(set(media_types)))
示例#11
0
    def set_media_types(self, extra, worker_metadatas):
        media_types = []

        # Set media_types for the base case
        super(KojiImportPlugin, self).set_media_types(extra, worker_metadatas)
        # Adjust media_types to include annotations
        for platform in worker_metadatas:
            annotations = get_worker_build_info(self.workflow,
                                                platform).build.get_annotations()
            if annotations.get('media-types'):
                media_types = json.loads(annotations['media-types'])
                break

        # Extend existing with new, if any; de-dupe and re-sort.
        if media_types:
            extra['image']['media_types'] = sorted(set(
                extra['image'].get('media_types', []) + media_types
            ))
    def delete_v1_layers(self, repo_prefix="redhat-"):
        annotations = self.workflow.build_result.annotations
        if not annotations:
            # No worker builds created
            return

        worker_builds = annotations['worker-builds']

        for platform in worker_builds:
            build_info = get_worker_build_info(self.workflow, platform)
            annotations = build_info.build.get_annotations() or {}
            v1_image_id = annotations.get('v1-image-id')
            if v1_image_id:
                image_names = self.workflow.tag_conf.images
                self.pulp_handler.create_dockpulp()
                if not repo_prefix:
                    repo_prefix = ''
                pulp_repos = set(['%s%s' % (repo_prefix, image.pulp_repo) for image in image_names])
                for repo_id in pulp_repos:
                    self.log.info("removing %s from repo %s", v1_image_id, repo_id)
                    self.pulp_handler.remove_image(repo_id, v1_image_id)
    def run(self):
        """
        Run the plugin.
        """

        metadatas = {}

        # get all the build annotations and labels from the orchestrator
        build_result = self.workflow.build_result

        annotations = build_result.annotations
        worker_builds = annotations['worker-builds']

        for platform, build_annotations in worker_builds.items():
            # retrieve all the workspace data
            build_info = get_worker_build_info(self.workflow, platform)
            osbs = build_info.osbs

            kind = "configmap/"
            cmlen = len(kind)
            cm_key_tmp = build_annotations['metadata_fragment']
            cm_frag_key = build_annotations['metadata_fragment_key']

            if not cm_key_tmp or not cm_frag_key or cm_key_tmp[:cmlen] != kind:
                self.log.warning("Bad ConfigMap annotations for platform %s",
                                 platform)
                continue

            # use the key to get the configmap data and then use the
            # fragment_key to get the build metadata inside the configmap data
            # save the worker_build metadata
            cm_key = cm_key_tmp[cmlen:]
            cm_data = osbs.get_config_map(cm_key)
            metadata = cm_data.get_data_by_key(cm_frag_key)

            metadatas[platform] = metadata

            defer_removal(self.workflow, cm_key, osbs)

        return metadatas
    def run(self):
        """
        Run the plugin.
        """

        worker_builds = self.workflow.build_result.annotations['worker-builds']
        has_v1_image_id = None
        repo_tags = {}

        for platform in worker_builds:
            build_info = get_worker_build_info(self.workflow, platform)
            annotations = build_info.build.get_annotations()
            v1_image_id = annotations.get('v1-image-id')
            if v1_image_id:
                if has_v1_image_id:
                    msg = "two platforms with v1-image-ids: {0} and {1}".format(platform,
                                                                                has_v1_image_id)
                    raise RuntimeError(msg)
                has_v1_image_id = platform
                self.log.info("tagging v1-image-id %s for platform %s", v1_image_id, platform)
                ret_val = self.set_v1_tags(v1_image_id)
                if ret_val:
                    repo_tags = ret_val
        return repo_tags
示例#15
0
    def set_group_manifest_info(self, extra, worker_metadatas):
        version_release = None
        primary_images = get_primary_images(self.workflow)
        floating_images = get_floating_images(self.workflow)
        unique_images = get_unique_images(self.workflow)
        if primary_images:
            version_release = primary_images[0].tag

        if is_scratch_build(self.workflow):
            tags = [image.tag for image in self.workflow.tag_conf.images]
            version_release = tags[0]
        else:
            assert version_release is not None, 'Unable to find version-release image'
            tags = [image.tag for image in primary_images]

        floating_tags = [image.tag for image in floating_images]
        unique_tags = [image.tag for image in unique_images]

        manifest_data = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY, {})
        if manifest_data and is_manifest_list(manifest_data.get("media_type")):
            manifest_digest = manifest_data.get("manifest_digest")
            index = {}
            index['tags'] = tags
            index['floating_tags'] = floating_tags
            index['unique_tags'] = unique_tags
            build_image = get_unique_images(self.workflow)[0]
            repo = ImageName.parse(build_image).to_str(registry=False, tag=False)
            # group_manifests added the registry, so this should be valid
            registries = self.workflow.push_conf.all_registries

            digest_version = get_manifest_media_version(manifest_digest)
            digest = manifest_digest.default

            for registry in registries:
                pullspec = "{0}/{1}@{2}".format(registry.uri, repo, digest)
                index['pull'] = [pullspec]
                pullspec = "{0}/{1}:{2}".format(registry.uri, repo,
                                                version_release)
                index['pull'].append(pullspec)

                # Store each digest with according media type
                index['digests'] = {}
                media_type = get_manifest_media_type(digest_version)
                index['digests'][media_type] = digest

                break
            extra['image']['index'] = index
        # group_manifests returns None if didn't run, {} if group=False
        else:
            for platform in worker_metadatas:
                if platform == "x86_64":
                    for instance in worker_metadatas[platform]['output']:
                        if instance['type'] == 'docker-image':
                            # koji_upload, running in the worker, doesn't have the full tags
                            # so set them here
                            instance['extra']['docker']['tags'] = tags
                            instance['extra']['docker']['floating_tags'] = floating_tags
                            instance['extra']['docker']['unique_tags'] = unique_tags
                            repositories = []
                            for pullspec in instance['extra']['docker']['repositories']:
                                if '@' not in pullspec:
                                    image = ImageName.parse(pullspec)
                                    image.tag = version_release
                                    pullspec = image.to_str()

                                repositories.append(pullspec)

                            instance['extra']['docker']['repositories'] = repositories
                            self.log.debug("reset tags to so that docker is %s",
                                           instance['extra']['docker'])
                            annotations = get_worker_build_info(self.workflow, platform).\
                                build.get_annotations()

                            digests = {}
                            if 'digests' in annotations:
                                digests = get_digests_map_from_annotations(annotations['digests'])
                                instance['extra']['docker']['digests'] = digests
def test_orchestrate_build(tmpdir, caplog, config_kwargs, worker_build_image, logs_return_bytes):
    workflow = mock_workflow(tmpdir)
    mock_osbs(logs_return_bytes=logs_return_bytes)
    mock_reactor_config(tmpdir)
    plugin_args = {
        'platforms': ['x86_64'],
        'build_kwargs': make_worker_build_kwargs(),
        'osbs_client_config': str(tmpdir),
    }
    if worker_build_image:
        plugin_args['worker_build_image'] = worker_build_image
    if config_kwargs is not None:
        plugin_args['config_kwargs'] = config_kwargs

    runner = BuildStepPluginsRunner(
        workflow.builder.tasker,
        workflow,
        [{
            'name': OrchestrateBuildPlugin.key,
            'args': plugin_args
        }]
    )

    expected_kwargs = {
        'conf_section': 'worker_x86_64',
        'conf_file': tmpdir + '/osbs.conf',
    }
    # Update with config_kwargs last to ensure that, when set
    # always has precedence over worker_build_image param.
    if config_kwargs is not None:
        expected_kwargs.update(config_kwargs)
    expected_kwargs['build_image'] = 'some_image:latest'

    (flexmock(Configuration).should_call('__init__').with_args(**expected_kwargs).once())

    build_result = runner.run()
    assert not build_result.is_failed()

    assert (build_result.annotations == {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'worker-build-x86_64',
                    'cluster-url': 'https://worker_x86_64.com/',
                    'namespace': 'worker_x86_64_namespace'
                },
                'digests': [],
                'plugins-metadata': {}
            }
        }
    })

    assert (build_result.labels == {})

    build_info = get_worker_build_info(workflow, 'x86_64')
    assert build_info.osbs

    for record in caplog.records():
        if not record.name.startswith("atomic_reactor"):
            continue

        assert hasattr(record, 'arch')
        if record.funcName == 'watch_logs':
            assert record.arch == 'x86_64'
        else:
            assert record.arch == '-'
示例#17
0
def test_orchestrate_build_annotations_and_labels(tmpdir, metadata_fragment):
    workflow = mock_workflow(tmpdir)
    mock_osbs()

    md = {
        'metadata_fragment': 'configmap/spam-md',
        'metadata_fragment_key': 'metadata.json'
    }

    def mock_wait_for_build_to_finish(build_name):
        annotations = {
            'repositories': json.dumps({
                'unique': ['{}-unique'.format(build_name)],
                'primary': ['{}-primary'.format(build_name)],
            }),
            'digests': json.dumps([
                {
                    'digest': 'sha256:{}-digest'.format(build_name),
                    'tag': '{}-latest'.format(build_name),
                    'registry': '{}-registry'.format(build_name),
                    'repository': '{}-repository'.format(build_name),
                },
            ]),
        }
        if metadata_fragment:
            annotations.update(md)

        labels = {'koji-build-id': 'koji-build-id'}
        return make_build_response(build_name, 'Complete', annotations, labels)
    (flexmock(OSBS)
        .should_receive('wait_for_build_to_finish')
        .replace_with(mock_wait_for_build_to_finish))

    mock_reactor_config(tmpdir)
    runner = BuildStepPluginsRunner(
        workflow.builder.tasker,
        workflow,
        [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': ['x86_64', 'ppc64le'],
                'build_kwargs': make_worker_build_kwargs(),
                'osbs_client_config': str(tmpdir),
            }
        }]
    )
    build_result = runner.run()
    assert not build_result.is_failed()

    expected = {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'worker-build-x86_64',
                    'cluster-url': 'https://worker_x86_64.com/',
                    'namespace': 'worker_x86_64_namespace'
                },
                'digests': [
                    {
                        'digest': 'sha256:worker-build-x86_64-digest',
                        'tag': 'worker-build-x86_64-latest',
                        'registry': 'worker-build-x86_64-registry',
                        'repository': 'worker-build-x86_64-repository',
                    },
                ],
                'plugins-metadata': {}
            },
            'ppc64le': {
                'build': {
                    'build-name': 'worker-build-ppc64le',
                    'cluster-url': 'https://worker_ppc64le.com/',
                    'namespace': 'worker_ppc64le_namespace'
                },
                'digests': [
                    {
                        'digest': 'sha256:worker-build-ppc64le-digest',
                        'tag': 'worker-build-ppc64le-latest',
                        'registry': 'worker-build-ppc64le-registry',
                        'repository': 'worker-build-ppc64le-repository',
                    },
                ],
                'plugins-metadata': {}
            },
        },
        'repositories': {
            'unique': [
                'worker-build-ppc64le-unique',
                'worker-build-x86_64-unique',
            ],
            'primary': [
                'worker-build-ppc64le-primary',
                'worker-build-x86_64-primary',
            ],
        },
    }
    if metadata_fragment:
        expected['worker-builds']['x86_64'].update(md)
        expected['worker-builds']['ppc64le'].update(md)

    assert (build_result.annotations == expected)

    assert (build_result.labels == {'koji-build-id': 'koji-build-id'})

    build_info = get_worker_build_info(workflow, 'x86_64')
    assert build_info.osbs

    koji_upload_dir = get_koji_upload_dir(workflow)
    assert koji_upload_dir
示例#18
0
def test_orchestrate_build(tmpdir, caplog, config_kwargs, worker_build_image, logs_return_bytes):
    workflow = mock_workflow(tmpdir)
    mock_osbs(logs_return_bytes=logs_return_bytes)
    mock_reactor_config(tmpdir)
    plugin_args = {
        'platforms': ['x86_64'],
        'build_kwargs': make_worker_build_kwargs(),
        'osbs_client_config': str(tmpdir),
    }
    if worker_build_image:
        plugin_args['worker_build_image'] = worker_build_image
    if config_kwargs is not None:
        plugin_args['config_kwargs'] = config_kwargs

    runner = BuildStepPluginsRunner(
        workflow.builder.tasker,
        workflow,
        [{
            'name': OrchestrateBuildPlugin.key,
            'args': plugin_args
        }]
    )

    expected_kwargs = {
        'conf_section': 'worker_x86_64',
        'conf_file': tmpdir + '/osbs.conf',
    }
    if worker_build_image:
        expected_kwargs['build_image'] = worker_build_image
    # Update with config_kwargs last to ensure that, when set
    # always has precedence over worker_build_image param.
    if config_kwargs is not None:
        expected_kwargs.update(config_kwargs)

    (flexmock(Configuration).should_call('__init__').with_args(**expected_kwargs).once())

    build_result = runner.run()
    assert not build_result.is_failed()

    assert (build_result.annotations == {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'worker-build-x86_64',
                    'cluster-url': 'https://worker_x86_64.com/',
                    'namespace': 'worker_x86_64_namespace'
                },
                'digests': [],
                'plugins-metadata': {}
            }
        }
    })

    assert (build_result.labels == {})

    build_info = get_worker_build_info(workflow, 'x86_64')
    assert build_info.osbs

    for record in caplog.records():
        if not record.name.startswith("atomic_reactor"):
            continue

        assert hasattr(record, 'arch')
        if record.funcName == 'watch_logs':
            assert record.arch == 'x86_64'
        else:
            assert record.arch == '-'
def test_orchestrate_build_annotations_and_labels(tmpdir, metadata_fragment):
    workflow = mock_workflow(tmpdir)
    mock_osbs()

    md = {
        'metadata_fragment': 'configmap/spam-md',
        'metadata_fragment_key': 'metadata.json'
    }

    def mock_wait_for_build_to_finish(build_name):
        annotations = {
            'repositories': json.dumps({
                'unique': ['{}-unique'.format(build_name)],
                'primary': ['{}-primary'.format(build_name)],
            }),
            'digests': json.dumps([
                {
                    'digest': 'sha256:{}-digest'.format(build_name),
                    'tag': '{}-latest'.format(build_name),
                    'registry': '{}-registry'.format(build_name),
                    'repository': '{}-repository'.format(build_name),
                },
            ]),
        }
        if metadata_fragment:
            annotations.update(md)

        labels = {'koji-build-id': 'koji-build-id'}
        return make_build_response(build_name, 'Complete', annotations, labels)
    (flexmock(OSBS)
        .should_receive('wait_for_build_to_finish')
        .replace_with(mock_wait_for_build_to_finish))

    mock_reactor_config(tmpdir)
    runner = BuildStepPluginsRunner(
        workflow.builder.tasker,
        workflow,
        [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': ['x86_64', 'ppc64le'],
                'build_kwargs': make_worker_build_kwargs(),
                'osbs_client_config': str(tmpdir),
                'max_cluster_fails': 2,
                'unreachable_cluster_retry_delay': .1
            }
        }]
    )
    build_result = runner.run()
    assert not build_result.is_failed()

    expected = {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'worker-build-x86_64',
                    'cluster-url': 'https://worker_x86_64.com/',
                    'namespace': 'worker_x86_64_namespace'
                },
                'digests': [
                    {
                        'digest': 'sha256:worker-build-x86_64-digest',
                        'tag': 'worker-build-x86_64-latest',
                        'registry': 'worker-build-x86_64-registry',
                        'repository': 'worker-build-x86_64-repository',
                    },
                ],
                'plugins-metadata': {}
            },
            'ppc64le': {
                'build': {
                    'build-name': 'worker-build-ppc64le',
                    'cluster-url': 'https://worker_ppc64le.com/',
                    'namespace': 'worker_ppc64le_namespace'
                },
                'digests': [
                    {
                        'digest': 'sha256:worker-build-ppc64le-digest',
                        'tag': 'worker-build-ppc64le-latest',
                        'registry': 'worker-build-ppc64le-registry',
                        'repository': 'worker-build-ppc64le-repository',
                    },
                ],
                'plugins-metadata': {}
            },
        },
        'repositories': {
            'unique': [
                'worker-build-ppc64le-unique',
                'worker-build-x86_64-unique',
            ],
            'primary': [
                'worker-build-ppc64le-primary',
                'worker-build-x86_64-primary',
            ],
        },
    }
    if metadata_fragment:
        expected['worker-builds']['x86_64'].update(md)
        expected['worker-builds']['ppc64le'].update(md)

    assert (build_result.annotations == expected)

    assert (build_result.labels == {'koji-build-id': 'koji-build-id'})

    build_info = get_worker_build_info(workflow, 'x86_64')
    assert build_info.osbs

    koji_upload_dir = get_koji_upload_dir(workflow)
    assert koji_upload_dir
示例#20
0
    def set_group_manifest_info(self, extra, worker_metadatas):
        version_release = None
        primary_images = get_primary_images(self.workflow)
        for image in primary_images:
            if '-' in image.tag:  # {version}-{release} only, and only one instance
                version_release = image.tag
                break

        assert version_release is not None, 'Unable to find version-release image'
        tags = [image.tag for image in primary_images]

        manifest_list_digests = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY)
        if manifest_list_digests:
            index = {}
            index['tags'] = tags
            repositories = self.workflow.build_result.annotations['repositories']['unique']
            repo = ImageName.parse(repositories[0]).to_str(registry=False, tag=False)
            # group_manifests added the registry, so this should be valid
            registries = self.workflow.push_conf.pulp_registries
            if not registries:
                registries = self.workflow.push_conf.all_registries
            for registry in registries:
                manifest_list_digest = manifest_list_digests[repo]
                pullspec = "{0}/{1}@{2}".format(registry.uri, repo, manifest_list_digest.default)
                index['pull'] = [pullspec]
                pullspec = "{0}/{1}:{2}".format(registry.uri, repo,
                                                version_release)
                index['pull'].append(pullspec)

                # Store each digest with according media type
                index['digests'] = {}
                for version, digest in manifest_list_digest.items():
                    if digest:
                        media_type = get_manifest_media_type(version)
                        index['digests'][media_type] = digest
                break
            extra['image']['index'] = index
        # group_manifests returns None if didn't run, {} if group=False
        else:
            for platform in worker_metadatas:
                if platform == "x86_64":
                    for instance in worker_metadatas[platform]['output']:
                        if instance['type'] == 'docker-image':
                            # koji_upload, running in the worker, doesn't have the full tags
                            # so set them here
                            instance['extra']['docker']['tags'] = tags
                            repositories = []
                            for pullspec in instance['extra']['docker']['repositories']:
                                if '@' not in pullspec:
                                    image = ImageName.parse(pullspec)
                                    image.tag = version_release
                                    pullspec = image.to_str()

                                repositories.append(pullspec)

                            instance['extra']['docker']['repositories'] = repositories
                            self.log.debug("reset tags to so that docker is %s",
                                           instance['extra']['docker'])
                            annotations = get_worker_build_info(self.workflow, platform).\
                                build.get_annotations()
                            digests = {}
                            if 'digests' in annotations:
                                digests = get_digests_map_from_annotations(annotations['digests'])
                                instance['extra']['docker']['digests'] = digests
    def set_group_manifest_info(self, extra, worker_metadatas):
        version_release = None
        primary_images = get_primary_images(self.workflow)
        for image in primary_images:
            if '-' in image.tag:  # {version}-{release} only, and only one instance
                version_release = image.tag
                break

        assert version_release is not None, 'Unable to find version-release image'
        tags = [image.tag for image in primary_images]

        manifest_list_digests = self.workflow.postbuild_results.get(
            PLUGIN_GROUP_MANIFESTS_KEY)
        if manifest_list_digests:
            index = {}
            index['tags'] = tags
            repositories = self.workflow.build_result.annotations[
                'repositories']['unique']
            repo = ImageName.parse(repositories[0]).to_str(registry=False,
                                                           tag=False)
            # group_manifests added the registry, so this should be valid
            registries = self.workflow.push_conf.pulp_registries
            if not registries:
                registries = self.workflow.push_conf.all_registries
            for registry in registries:
                manifest_list_digest = manifest_list_digests[repo]
                pullspec = "{0}/{1}@{2}".format(registry.uri, repo,
                                                manifest_list_digest.default)
                index['pull'] = [pullspec]
                pullspec = "{0}/{1}:{2}".format(registry.uri, repo,
                                                version_release)
                index['pull'].append(pullspec)

                # Store each digest with according media type
                index['digests'] = {}
                for version, digest in manifest_list_digest.items():
                    if digest:
                        media_type = get_manifest_media_type(version)
                        index['digests'][media_type] = digest
                break
            extra['image']['index'] = index
        # group_manifests returns None if didn't run, {} if group=False
        else:
            for platform in worker_metadatas:
                if platform == "x86_64":
                    for instance in worker_metadatas[platform]['output']:
                        if instance['type'] == 'docker-image':
                            # koji_upload, running in the worker, doesn't have the full tags
                            # so set them here
                            instance['extra']['docker']['tags'] = tags
                            repositories = []
                            for pullspec in instance['extra']['docker'][
                                    'repositories']:
                                if '@' not in pullspec:
                                    image = ImageName.parse(pullspec)
                                    image.tag = version_release
                                    pullspec = image.to_str()

                                repositories.append(pullspec)

                            instance['extra']['docker'][
                                'repositories'] = repositories
                            self.log.debug(
                                "reset tags to so that docker is %s",
                                instance['extra']['docker'])
                            annotations = get_worker_build_info(self.workflow, platform).\
                                build.get_annotations()
                            digests = {}
                            if 'digests' in annotations:
                                digests = get_digests_map_from_annotations(
                                    annotations['digests'])
                                instance['extra']['docker'][
                                    'digests'] = digests