def _store_manifest_digest(self, image: ImageName, use_original_tag: bool) -> None:
        """Store media type and digest for manifest list or v2 schema 2 manifest digest"""
        image_str = image.to_str()
        manifest_list = self._get_manifest_list(image)
        reg_client = self._get_registry_client(image.registry)
        if manifest_list:
            digest_dict = get_checksums(BytesIO(manifest_list.content), ['sha256'])
            media_type = get_manifest_media_type('v2_list')
        else:
            digests_dict = reg_client.get_all_manifests(image, versions=('v2',))
            media_type = get_manifest_media_type('v2')
            try:
                manifest_digest_response = digests_dict['v2']
            except KeyError as exc:
                raise RuntimeError(
                    'Unable to fetch manifest list or '
                    'v2 schema 2 digest for {} (Does image exist?)'.format(image_str)
                ) from exc

            digest_dict = get_checksums(BytesIO(manifest_digest_response.content), ['sha256'])

        manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum'])
        parent_digests = {media_type: manifest_digest}
        if use_original_tag:
            # image tag may have been replaced with a ref for autorebuild; use original tag
            # to simplify fetching parent_images_digests data in other plugins
            image = image.copy()
            base_image_key: ImageName = self.workflow.data.dockerfile_images.base_image_key
            image.tag = base_image_key.tag
            image_str = image.to_str()

        self.workflow.data.parent_images_digests[image_str] = parent_digests
Пример #2
0
    def _fetch_manifest_digest(self, image: ImageName) -> Dict[str, str]:
        """Fetch media type and digest for manifest list or v2 schema 2 manifest digest"""
        image_str = image.to_str()
        manifest_list = self._get_manifest_list(image)
        reg_client = self._get_registry_client(image.registry)
        if manifest_list:
            digest_dict = get_checksums(BytesIO(manifest_list.content),
                                        ['sha256'])
            media_type = get_manifest_media_type('v2_list')
        else:
            digests_dict = reg_client.get_all_manifests(image,
                                                        versions=('v2', ))
            media_type = get_manifest_media_type('v2')
            try:
                manifest_digest_response = digests_dict['v2']
            except KeyError as exc:
                raise RuntimeError(
                    'Unable to fetch manifest list or '
                    'v2 schema 2 digest for {} (Does image exist?)'.format(
                        image_str)) from exc

            digest_dict = get_checksums(
                BytesIO(manifest_digest_response.content), ['sha256'])

        manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum'])
        parent_digest = {media_type: manifest_digest}
        return parent_digest
Пример #3
0
    def push_with_skopeo(self, image: Dict[str, str], registry_image: ImageName, insecure: bool,
                         docker_push_secret: str) -> None:
        cmd = ['skopeo', 'copy']
        if docker_push_secret is not None:
            dockercfg = Dockercfg(docker_push_secret)
            cmd.append('--authfile=' + dockercfg.json_secret_path)

        if insecure:
            cmd.append('--dest-tls-verify=false')

        if image['type'] == IMAGE_TYPE_OCI:
            # ref_name is added by 'flatpak_create_oci'
            # we have to be careful when changing the source container image type
            # since assumption here is that source container image will always be 'docker-archive'
            source_img = 'oci:{path}:{ref_name}'.format(**image)
            cmd.append('--format=v2s2')
        elif image['type'] == IMAGE_TYPE_DOCKER_ARCHIVE:
            source_img = 'docker-archive://{path}'.format(**image)
        else:
            raise RuntimeError("Attempt to push unsupported image type %s with skopeo" %
                               image['type'])

        dest_img = 'docker://' + registry_image.to_str()

        cmd += [source_img, dest_img]

        try:
            retries.run_cmd(cmd)
        except subprocess.CalledProcessError as e:
            self.log.error("push failed with output:\n%s", e.output)
            raise
    def _get_image_with_digest(self, image: ImageName) -> Optional[ImageName]:
        image_str = image.to_str()
        try:
            image_metadata = self.workflow.data.parent_images_digests[image_str]
        except KeyError:
            return None

        v2_list_type = get_manifest_media_type('v2_list')
        v2_type = get_manifest_media_type('v2')
        raw_digest = image_metadata.get(v2_list_type) or image_metadata.get(v2_type)
        if not raw_digest:
            return None

        digest = raw_digest.split(':', 1)[1]
        image_name = image.to_str(tag=False)
        new_image = '{}@sha256:{}'.format(image_name, digest)
        return ImageName.parse(new_image)
Пример #5
0
    def _pin_to_digest(self, image: ImageName,
                       digests: Dict[str, str]) -> ImageName:
        v2_list_type = get_manifest_media_type('v2_list')
        v2_type = get_manifest_media_type('v2')
        # one of v2_list, v2 *must* be present in the dict
        raw_digest = digests.get(v2_list_type) or digests[v2_type]

        digest = raw_digest.split(':', 1)[1]
        image_name = image.to_str(tag=False)
        new_image = '{}@sha256:{}'.format(image_name, digest)
        return ImageName.parse(new_image)
Пример #6
0
    def check_manifest_digest(self, image: ImageName,
                              build_info: Dict[str, Any]) -> None:
        """Check if the manifest list digest is correct.

        Compares the manifest list digest with the value in koji metadata.
        Raises a ValueError if the manifest list does not refer to the koji build.

        :param image: ImageName, image to inspect
        :param build_info: dict, koji build metadata
        """
        image_str = image.to_str()
        v2_list_type = get_manifest_media_type('v2_list')
        v2_type = get_manifest_media_type('v2')
        image_digest_data = self.workflow.data.parent_images_digests[image_str]
        if v2_list_type in image_digest_data:
            media_type = v2_list_type
        elif v2_type in image_digest_data:
            media_type = v2_type
        else:
            # This should not happen - raise just to be safe:
            raise RuntimeError('Unexpected parent image digest data for {}. '
                               'v2 or v2_list expected, got {}'.format(
                                   image, image_digest_data))

        digest = image_digest_data[media_type]

        try:
            koji_digest = build_info['extra']['image']['index']['digests'][
                media_type]
        except KeyError as exc:
            err_msg = (
                "Koji build ({}) for parent image '{}' does not have manifest digest data "
                "for the expected media type '{}'. This parent image MUST be rebuilt"
                .format(build_info['id'], image_str, media_type))
            self.log.error(err_msg)
            raise ValueError(err_msg) from exc

        expected_digest = koji_digest
        self.log.info(
            'Verifying manifest digest (%s) for parent %s against its '
            'koji reference (%s)', digest, image_str, expected_digest)
        if digest != expected_digest:
            rebuild_msg = 'This parent image MUST be rebuilt'
            mismatch_msg = (
                'Manifest digest (%s) for parent image %s does not match value in its '
                'koji reference (%s). %s')
            if not self._deep_manifest_list_inspection:
                self.log.error(mismatch_msg, digest, image_str,
                               expected_digest, rebuild_msg)
                raise ValueError(
                    mismatch_msg %
                    (digest, image_str, expected_digest, rebuild_msg))

            deep_inspection_msg = 'Checking manifest list contents...'
            self.log.warning(mismatch_msg, digest, image_str, expected_digest,
                             deep_inspection_msg)
            if not self.manifest_list_entries_match(image, build_info['id']):
                err_msg = (
                    'Manifest list for parent image %s differs from the manifest list for '
                    'its koji reference. %s')
                self.log.error(err_msg, image_str, rebuild_msg)
                raise ValueError(err_msg % (image_str, rebuild_msg))
Пример #7
0
def get_output(workflow: DockerBuildWorkflow,
               buildroot_id: str,
               pullspec: ImageName,
               platform: str,
               source_build: bool = False):
    """
    Build the 'output' section of the metadata.
    :param buildroot_id: str, buildroot_id
    :param pullspec: ImageName
    :param platform: str, output platform
    :param source_build: bool, is source_build ?
    :param logs: list, of Output logs
    :return: tuple, list of Output instances, and extra Output file
    """
    def add_buildroot_id(output: Output) -> Output:
        output.metadata.update({'buildroot_id': buildroot_id})
        return output

    extra_output_file = None
    output_files: List[Output] = []
    image_id: str

    if source_build:
        manifest = workflow.data.koji_source_manifest
        image_id = manifest['config']['digest']
        # we are using digest from manifest, because we can't get diff_ids
        # unless we pull image, which would fail due because there are so many layers
        layer_sizes = [{
            'digest': layer['digest'],
            'size': layer['size']
        } for layer in manifest['layers']]
        platform = os.uname()[4]

    else:
        imageutil = workflow.imageutil
        image_id = imageutil.get_inspect_for_image(pullspec,
                                                   platform=platform)['Id']

        parent_id = None
        if not workflow.data.dockerfile_images.base_from_scratch:
            parent_id = imageutil.base_image_inspect(platform)['Id']

        image_archive = str(
            workflow.build_dir.platform_dir(platform).exported_squashed_image)
        layer_sizes = imageutil.get_uncompressed_image_layer_sizes(
            image_archive)

    digests = get_manifest_digests(pullspec, workflow.conf.registry['uri'],
                                   workflow.conf.registry['insecure'],
                                   workflow.conf.registry.get('secret', None))

    if digests.v2:
        config_manifest_digest = digests.v2
        config_manifest_type = 'v2'
    else:
        config_manifest_digest = digests.oci
        config_manifest_type = 'oci'

    config = get_config_from_registry(
        pullspec, workflow.conf.registry['uri'], config_manifest_digest,
        workflow.conf.registry['insecure'],
        workflow.conf.registry.get('secret', None), config_manifest_type)

    # We don't need container_config section
    if config and 'container_config' in config:
        del config['container_config']

    digest_pullspec = f"{pullspec.to_str(tag=False)}@{select_digest(digests)}"
    repositories = [pullspec.to_str(), digest_pullspec]

    typed_digests = {
        get_manifest_media_type(version): digest
        for version, digest in digests.items() if version != "v1"
    }

    tag_conf = workflow.data.tag_conf
    if source_build:
        image_type = IMAGE_TYPE_DOCKER_ARCHIVE
        tags = sorted(set(image.tag for image in tag_conf.images))
    else:
        image_metadatas = workflow.data.postbuild_results[
            FetchDockerArchivePlugin.key]
        image_type = image_metadatas[platform]["type"]
        tags = sorted(
            image.tag
            for image in tag_conf.get_unique_images_with_platform(platform))

    metadata, output = get_image_output(image_type, image_id, platform,
                                        pullspec)

    metadata.update({
        'arch': platform,
        'type': 'docker-image',
        'components': [],
        'extra': {
            'image': {
                'arch': platform,
            },
            'docker': {
                'id': image_id,
                'repositories': repositories,
                'layer_sizes': layer_sizes,
                'tags': tags,
                'config': config,
                'digests': typed_digests,
            },
        },
    })

    if not config:
        del metadata['extra']['docker']['config']

    if not source_build:
        metadata['components'] = get_image_components(workflow, platform)

        if not workflow.data.dockerfile_images.base_from_scratch:
            metadata['extra']['docker']['parent_id'] = parent_id

    # Add the 'docker save' image to the output
    image = add_buildroot_id(output)

    # when doing regular build, worker already uploads image,
    # so orchestrator needs only metadata,
    # but source contaiener build didn't upload that image yet,
    # so we want metadata, and the image to upload
    if source_build:
        output_files.append(metadata)
        extra_output_file = output
    else:
        output_files.append(image)

    if not source_build:
        # add operator manifests to output
        operator_manifests_path = (workflow.data.postbuild_results.get(
            PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY))
        if operator_manifests_path:
            manifests_metadata = get_output_metadata(
                operator_manifests_path, OPERATOR_MANIFESTS_ARCHIVE)
            operator_manifests_output = Output(
                filename=operator_manifests_path, metadata=manifests_metadata)
            add_custom_type(operator_manifests_output,
                            KOJI_BTYPE_OPERATOR_MANIFESTS)

            operator_manifests = add_buildroot_id(operator_manifests_output)
            output_files.append(operator_manifests)

    return output_files, extra_output_file
Пример #8
0
def get_output(workflow: DockerBuildWorkflow,
               buildroot_id: str,
               pullspec: ImageName,
               platform: str,
               source_build: bool = False):
    """
    Build the 'output' section of the metadata.
    :param buildroot_id: str, buildroot_id
    :param pullspec: ImageName
    :param platform: str, output platform
    :param source_build: bool, is source_build ?
    :param logs: list, of Output logs
    :return: tuple, list of Output instances, and extra Output file
    """
    def add_buildroot_id(output: Output) -> Output:
        output.metadata.update({'buildroot_id': buildroot_id})
        return output

    extra_output_file = None
    output_files: List[Output] = []
    image_id: str

    if source_build:
        manifest = workflow.data.koji_source_manifest
        image_id = manifest['config']['digest']
        # we are using digest from manifest, because we can't get diff_ids
        # unless we pull image, which would fail due because there are so many layers
        layer_sizes = [{
            'digest': layer['digest'],
            'size': layer['size']
        } for layer in manifest['layers']]
        platform = os.uname()[4]

    else:
        imageutil = workflow.imageutil
        image_id = imageutil.get_inspect_for_image(pullspec,
                                                   platform=platform)['Id']

        inspect = imageutil.base_image_inspect(platform)
        parent_id = inspect['Id'] if inspect else None

        image_archive = str(
            workflow.build_dir.platform_dir(platform).exported_squashed_image)
        layer_sizes = imageutil.get_uncompressed_image_layer_sizes(
            image_archive)

    digests = get_manifest_digests(pullspec, workflow.conf.registry['uri'],
                                   workflow.conf.registry['insecure'],
                                   workflow.conf.registry.get('secret', None))

    if digests.v2:
        config_manifest_digest = digests.v2
        config_manifest_type = 'v2'
    else:
        config_manifest_digest = digests.oci
        config_manifest_type = 'oci'

    config = get_config_from_registry(
        pullspec, workflow.conf.registry['uri'], config_manifest_digest,
        workflow.conf.registry['insecure'],
        workflow.conf.registry.get('secret', None), config_manifest_type)

    # We don't need container_config section
    if config and 'container_config' in config:
        del config['container_config']

    digest_pullspec = f"{pullspec.to_str(tag=False)}@{select_digest(digests)}"
    repositories = [pullspec.to_str(), digest_pullspec]

    typed_digests = {
        get_manifest_media_type(version): digest
        for version, digest in digests.items() if version != "v1"
    }

    tag_conf = workflow.data.tag_conf
    if source_build:
        tags = sorted(set(image.tag for image in tag_conf.images))
    else:
        tags = sorted(
            image.tag
            for image in tag_conf.get_unique_images_with_platform(platform))

    # since we are storing oci image as v2s2 all images now have 'docker-archive' type
    metadata, output = get_image_output(IMAGE_TYPE_DOCKER_ARCHIVE, image_id,
                                        platform, pullspec)

    metadata.update({
        'arch': platform,
        'type': 'docker-image',
        'components': [],
        'extra': {
            'image': {
                'arch': platform,
            },
            'docker': {
                'id': image_id,
                'repositories': repositories,
                'layer_sizes': layer_sizes,
                'tags': tags,
                'config': config,
                'digests': typed_digests,
            },
        },
    })

    if not config:
        del metadata['extra']['docker']['config']

    if not source_build:
        metadata['components'] = get_image_components(workflow.data, platform)

        if parent_id is not None:
            metadata['extra']['docker']['parent_id'] = parent_id

    # Add the 'docker save' image to the output
    image = add_buildroot_id(output)

    if source_build:
        output_files.append(metadata)
        extra_output_file = output
    else:
        output_files.append(image)

    return output_files, extra_output_file