def check_manifest_digest(self, image, build_info): image_str = image.to_str() v2_list_type = get_manifest_media_type('v2_list') v2_type = get_manifest_media_type('v2') image_digest_data = self.workflow.builder.parent_images_digests[image_str] if v2_list_type in image_digest_data: media_type = v2_list_type elif v2_type in image_digest_data: media_type = v2_type else: # This should not happen - raise just to be safe: raise RuntimeError('Unexpected parent image digest data for {}. ' 'v2 or v2_list expected, got {}'.format(image, image_digest_data)) digest = image_digest_data[media_type] try: koji_digest = build_info['extra']['image']['index']['digests'][media_type] except KeyError: err_msg = ("Koji build ({}) for parent image '{}' does not have manifest digest data " "for the expected media type '{}'. This parent image MUST be rebuilt" .format(build_info['id'], image_str, media_type)) self.log.error(err_msg) raise ValueError(err_msg) expected_digest = koji_digest self.log.info('Verifying manifest digest ({}) for parent {} against its ' 'koji reference ({})'.format(digest, image_str, expected_digest)) if not digest == expected_digest: err_msg = ('Manifest digest ({}) for parent image {} does not match value in its ' 'koji reference ({}). This parent image MUST be rebuilt' .format(digest, image_str, expected_digest)) self.log.error(err_msg) raise ValueError(err_msg)
def _fetch_manifest_digest(self, image: ImageName) -> Dict[str, str]: """Fetch media type and digest for manifest list or v2 schema 2 manifest digest""" image_str = image.to_str() manifest_list = self._get_manifest_list(image) reg_client = self._get_registry_client(image.registry) if manifest_list: digest_dict = get_checksums(BytesIO(manifest_list.content), ['sha256']) media_type = get_manifest_media_type('v2_list') else: digests_dict = reg_client.get_all_manifests(image, versions=('v2', )) media_type = get_manifest_media_type('v2') try: manifest_digest_response = digests_dict['v2'] except KeyError as exc: raise RuntimeError( 'Unable to fetch manifest list or ' 'v2 schema 2 digest for {} (Does image exist?)'.format( image_str)) from exc digest_dict = get_checksums( BytesIO(manifest_digest_response.content), ['sha256']) manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum']) parent_digest = {media_type: manifest_digest} return parent_digest
def _store_manifest_digest(self, image, use_original_tag): """Store media type and digest for manifest list or v2 schema 2 manifest digest""" image_str = image.to_str() manifest_list = self._get_manifest_list(image) reg_client = self._get_registry_client(image.registry) if manifest_list: digest_dict = get_checksums(BytesIO(manifest_list.content), ['sha256']) media_type = get_manifest_media_type('v2_list') else: digests_dict = reg_client.get_all_manifests(image, versions=('v2', )) media_type = get_manifest_media_type('v2') try: manifest_digest_response = digests_dict['v2'] except KeyError as exc: raise RuntimeError( 'Unable to fetch manifest list or ' 'v2 schema 2 digest for {} (Does image exist?)'.format( image_str)) from exc digest_dict = get_checksums( BytesIO(manifest_digest_response.content), ['sha256']) manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum']) parent_digests = {media_type: manifest_digest} if use_original_tag: # image tag may have been replaced with a ref for autorebuild; use original tag # to simplify fetching parent_images_digests data in other plugins image = image.copy() image.tag = self.workflow.builder.dockerfile_images.base_image_key.tag image_str = image.to_str() self.workflow.builder.parent_images_digests[image_str] = parent_digests
def _pin_to_digest(self, image: ImageName, digests: Dict[str, str]) -> ImageName: v2_list_type = get_manifest_media_type('v2_list') v2_type = get_manifest_media_type('v2') # one of v2_list, v2 *must* be present in the dict raw_digest = digests.get(v2_list_type) or digests[v2_type] digest = raw_digest.split(':', 1)[1] image_name = image.to_str(tag=False) new_image = '{}@sha256:{}'.format(image_name, digest) return ImageName.parse(new_image)
def check_manifest_digest(self, image, build_info): """Check if the manifest list digest is correct. Compares the manifest list digest with the value in koji metadata. Raises a ValueError if the manifest list does not refer to the koji build. :param image: ImageName, image to inspect :param build_info: dict, koji build metadata """ image_str = image.to_str() v2_list_type = get_manifest_media_type('v2_list') v2_type = get_manifest_media_type('v2') image_digest_data = self.workflow.builder.parent_images_digests[image_str] if v2_list_type in image_digest_data: media_type = v2_list_type elif v2_type in image_digest_data: media_type = v2_type else: # This should not happen - raise just to be safe: raise RuntimeError('Unexpected parent image digest data for {}. ' 'v2 or v2_list expected, got {}'.format(image, image_digest_data)) digest = image_digest_data[media_type] try: koji_digest = build_info['extra']['image']['index']['digests'][media_type] except KeyError: err_msg = ("Koji build ({}) for parent image '{}' does not have manifest digest data " "for the expected media type '{}'. This parent image MUST be rebuilt" .format(build_info['id'], image_str, media_type)) self.log.error(err_msg) raise ValueError(err_msg) expected_digest = koji_digest self.log.info('Verifying manifest digest (%s) for parent %s against its ' 'koji reference (%s)', digest, image_str, expected_digest) if digest != expected_digest: rebuild_msg = 'This parent image MUST be rebuilt' mismatch_msg = ('Manifest digest (%s) for parent image %s does not match value in its ' 'koji reference (%s). %s') if not self._deep_manifest_list_inspection: self.log.error(mismatch_msg, digest, image_str, expected_digest, rebuild_msg) raise ValueError(mismatch_msg % (digest, image_str, expected_digest, rebuild_msg)) deep_inspection_msg = 'Checking manifest list contents...' self.log.warning(mismatch_msg, digest, image_str, expected_digest, deep_inspection_msg) if not self.manifest_list_entries_match(image, build_info['id']): err_msg = ('Manifest list for parent image %s differs from the manifest list for ' 'its koji reference. %s') self.log.error(err_msg, image_str, rebuild_msg) raise ValueError(err_msg % (image_str, rebuild_msg))
def sort_annotations(self, all_annotations): sorted_digests = {} all_platforms = set(all_annotations) for plat, annotation in all_annotations.items(): for digest in annotation['digests']: hostname = registry_hostname(digest['registry']) media_type = get_manifest_media_type(digest['version']) if not self.valid_media_type(media_type): continue platforms = sorted_digests.setdefault(hostname, {}) repos = platforms.setdefault(plat, []) repos.append(digest) sources = {} for registry in self.registries: hostname = registry_hostname(registry) platforms = sorted_digests.get(hostname, {}) if set(platforms) != all_platforms: raise RuntimeError("Missing platforms for registry {}: found {}, expected {}" .format(registry, sorted(platforms), sorted(all_platforms))) selected_digests = {} for p, repos in platforms.items(): selected_digests[p] = sorted(repos, key=lambda d: d['repository'])[0] sources[registry] = selected_digests return sources
def get_buildroot(self, worker_metadatas): """ Build the buildroot entry of the metadata. :return: list, containing dicts of partial metadata """ buildroots = [] buildroot = koji_get_buildroot(build_id=self.build_id, tasker=self.tasker, osbs=self.osbs, rpms=False) buildroot['id'] = '{}-{}'.format(buildroot['container']['arch'], buildroot['id']) registry = self.workflow.push_conf.docker_registries[0] build_name = get_unique_images(self.workflow)[0].to_str() manifest_digest = registry.digests[build_name] digest_version = get_manifest_media_version(manifest_digest) media_type = get_manifest_media_type(digest_version) buildroot['extra']['osbs']['koji'] = { 'build_name': build_name, 'builder_image_id': {media_type: manifest_digest.default} } buildroots.append(buildroot) return buildroots
def _get_image_with_digest(self, image: ImageName) -> Optional[ImageName]: image_str = image.to_str() try: image_metadata = self.workflow.data.parent_images_digests[image_str] except KeyError: return None v2_list_type = get_manifest_media_type('v2_list') v2_type = get_manifest_media_type('v2') raw_digest = image_metadata.get(v2_list_type) or image_metadata.get(v2_type) if not raw_digest: return None digest = raw_digest.split(':', 1)[1] image_name = image.to_str(tag=False) new_image = '{}@sha256:{}'.format(image_name, digest) return ImageName.parse(new_image)
def manifest_list_entries_match(self, image, build_id): """Check whether manifest list entries are in koji. Compares the digest in each manifest list entry with the koji build archive for the entry's architecture. Returns True if they all match. :param image: ImageName, image to inspect :param build_id: int, koji build ID for the image :return: bool, True if the manifest list content refers to the koji build archives """ if not image.registry: self.log.warning( 'Could not fetch manifest list for %s: missing registry ref', image) return False v2_type = get_manifest_media_type('v2') insecure = self._source_registry.get('insecure', False) dockercfg_path = self._source_registry.get('secret') manifest_list_response = get_manifest_list( image, image.registry, insecure=insecure, dockercfg_path=dockercfg_path) if not manifest_list_response: self.log.warning('Could not fetch manifest list for %s', image) return False manifest_list_data = {} manifest_list = json.loads(manifest_list_response.content) for manifest in manifest_list['manifests']: if manifest['mediaType'] != v2_type: self.log.warning('Unexpected media type in manifest list: %s', manifest) return False arch = manifest['platform']['architecture'] v2_digest = manifest['digest'] manifest_list_data[arch] = v2_digest archives = self.koji_session.listArchives(build_id) koji_archives_data = {} for archive in (a for a in archives if a['btype'] == KOJI_BTYPE_IMAGE): arch = archive['extra']['docker']['config']['architecture'] v2_digest = archive['extra']['docker']['digests'][v2_type] koji_archives_data[arch] = v2_digest if koji_archives_data == manifest_list_data: self.log.info( 'Deeper manifest list check verified v2 manifest references match' ) return True self.log.warning( 'Manifest list refs "%s" do not match koji archive refs "%s"', manifest_list_data, koji_archives_data) return False
def get_repositories_and_digests(self): """ Returns a map of images to their repositories and a map of media types to each digest it creates a map of images to digests, which is need to create the image->repository map and uses the same loop structure as media_types->digest, but the image->digest map isn't needed after we have the image->repository map and can be discarded. """ digests = {} # image -> digests typed_digests = {} # media_type -> digests for registry in self.workflow.push_conf.docker_registries: for image in self.workflow.tag_conf.images: image_str = image.to_str() if image_str in registry.digests: image_digests = registry.digests[image_str] if self.report_multiple_digests: digest_list = [ digest for digest in (image_digests.v1, image_digests.v2) if digest ] else: digest_list = [self.select_digest(image_digests)] digests[image.to_str(registry=False)] = digest_list for digest_version in image_digests.content_type: if digest_version not in image_digests: continue digest_type = get_manifest_media_type(digest_version) typed_digests[digest_type] = image_digests[ digest_version] if self.workflow.push_conf.pulp_registries: # If pulp was used, only report pulp images registries = self.workflow.push_conf.pulp_registries else: # Otherwise report all the images we pushed registries = self.workflow.push_conf.all_registries repositories = [] for registry in registries: image = self.pullspec_image.copy() image.registry = registry.uri pullspec = image.to_str() repositories.append(pullspec) digest_list = digests.get(image.to_str(registry=False), ()) for digest in digest_list: digest_pullspec = image.to_str(tag=False) + "@" + digest repositories.append(digest_pullspec) return repositories, typed_digests
def get_digests_with_types(self): digests_with_types = {} digests = {} image = self.workflow.tag_conf.unique_images[0] tag = image.to_str(registry=False) for registry in self.workflow.push_conf.docker_registries: if tag in registry.digests: digests.update(registry.digests[tag]) for version in digests: media_type = get_manifest_media_type(version) digests_with_types[media_type] = digests[version] return digests_with_types
def get_digests_with_types(self): digests_with_types = {} digests = {} image = self.workflow.tag_conf.unique_images[0] tag = image.to_str(registry=False) for registry in self.workflow.push_conf.docker_registries: if tag in registry.digests: digests.update(registry.digests[tag]) for version in digests: media_type = get_manifest_media_type(version) digests_with_types[media_type] = digests[version] return digests_with_types
def get_repositories_and_digests(self): """ Returns a map of images to their repositories and a map of media types to each digest it creates a map of images to digests, which is need to create the image->repository map and uses the same loop structure as media_types->digest, but the image->digest map isn't needed after we have the image->repository map and can be discarded. """ digests = {} # image -> digests typed_digests = {} # media_type -> digests for registry in self.workflow.push_conf.docker_registries: for image in self.workflow.tag_conf.images: image_str = image.to_str() if image_str in registry.digests: image_digests = registry.digests[image_str] if self.report_multiple_digests and get_pulp(self.workflow, None): digest_list = [digest for digest in (image_digests.v1, image_digests.v2) if digest] else: digest_list = [self.select_digest(image_digests)] digests[image.to_str(registry=False)] = digest_list for digest_version in image_digests.content_type: if digest_version not in image_digests: continue if not get_pulp(self.workflow, None) and digest_version == 'v1': continue digest_type = get_manifest_media_type(digest_version) typed_digests[digest_type] = image_digests[digest_version] if self.workflow.push_conf.pulp_registries: # If pulp was used, only report pulp images registries = self.workflow.push_conf.pulp_registries else: # Otherwise report all the images we pushed registries = self.workflow.push_conf.all_registries repositories = [] for registry in registries: image = self.pullspec_image.copy() image.registry = registry.uri pullspec = image.to_str() repositories.append(pullspec) digest_list = digests.get(image.to_str(registry=False), ()) for digest in digest_list: digest_pullspec = image.to_str(tag=False) + "@" + digest repositories.append(digest_pullspec) return repositories, typed_digests
def sort_annotations(self): """ Return a map of maps to look up a single "worker digest" that has information about where to find an image manifest for each registry/architecture combination: worker_digest = <result>[registry][architecture] """ all_annotations = self.workflow.build_result.annotations[ 'worker-builds'] all_platforms = set(all_annotations) if len(all_platforms) == 0: raise RuntimeError("No worker builds found, cannot group them") sorted_digests = {} for plat, annotation in all_annotations.items(): for digest in annotation['digests']: hostname = registry_hostname(digest['registry']) media_type = get_manifest_media_type(digest['version']) if media_type not in self.manifest_media_types: continue platforms = sorted_digests.setdefault(hostname, {}) repos = platforms.setdefault(plat, []) repos.append(digest) sources = {} for registry in self.registries: registry_conf = self.registries[registry] if registry_conf.get('version') == 'v1': continue hostname = registry_hostname(registry) platforms = sorted_digests.get(hostname, {}) if set(platforms) != all_platforms: raise RuntimeError( "Missing platforms for registry {}: found {}, expected {}". format(registry, sorted(platforms), sorted(all_platforms))) selected_digests = {} for p, repos in platforms.items(): selected_digests[p] = sorted(repos, key=lambda d: d['repository'])[0] sources[registry] = selected_digests return sources
def sort_annotations(self): """ Return a map of maps to look up a single "worker digest" that has information about where to find an image manifest for each registry/architecture combination: worker_digest = <result>[registry][architecture] """ all_annotations = self.workflow.build_result.annotations['worker-builds'] all_platforms = set(all_annotations) if len(all_platforms) == 0: raise RuntimeError("No worker builds found, cannot group them") sorted_digests = {} for plat, annotation in all_annotations.items(): for digest in annotation['digests']: hostname = registry_hostname(digest['registry']) media_type = get_manifest_media_type(digest['version']) if media_type not in self.manifest_media_types: continue platforms = sorted_digests.setdefault(hostname, {}) repos = platforms.setdefault(plat, []) repos.append(digest) sources = {} for registry in self.registries: registry_conf = self.registries[registry] if registry_conf.get('version') == 'v1': continue hostname = registry_hostname(registry) platforms = sorted_digests.get(hostname, {}) if set(platforms) != all_platforms: raise RuntimeError("Missing platforms for registry {}: found {}, expected {}" .format(registry, sorted(platforms), sorted(all_platforms))) selected_digests = {} for p, repos in platforms.items(): selected_digests[p] = sorted(repos, key=lambda d: d['repository'])[0] sources[registry] = selected_digests return sources
def get_repositories_and_digests(workflow, pullspec_image): """ Returns a map of images to their repositories and a map of media types to each digest it creates a map of images to digests, which is need to create the image->repository map and uses the same loop structure as media_types->digest, but the image->digest map isn't needed after we have the image->repository map and can be discarded. """ digests = {} # image -> digests typed_digests = {} # media_type -> digests for registry in workflow.push_conf.docker_registries: for image in workflow.tag_conf.images: image_str = image.to_str() if image_str in registry.digests: image_digests = registry.digests[image_str] digest_list = [select_digest(image_digests)] digests[image.to_str(registry=False)] = digest_list for digest_version in image_digests.content_type: if digest_version == 'v1': continue if digest_version not in image_digests: continue digest_type = get_manifest_media_type(digest_version) typed_digests[digest_type] = image_digests[digest_version] registries = workflow.push_conf.all_registries repositories = [] for registry in registries: image = pullspec_image.copy() image.registry = registry.uri pullspec = image.to_str() repositories.append(pullspec) digest_list = digests.get(image.to_str(registry=False), ()) for digest in digest_list: digest_pullspec = image.to_str(tag=False) + "@" + digest repositories.append(digest_pullspec) return repositories, typed_digests
def manifest_list_entries_match(self, image, build_id): """Check whether manifest list entries are in koji. Compares the digest in each manifest list entry with the koji build archive for the entry's architecture. Returns True if they all match. :param image: ImageName, image to inspect :param build_id: int, koji build ID for the image :return: bool, True if the manifest list content refers to the koji build archives """ if not image.registry: self.log.warning( 'Could not fetch manifest list for %s: missing registry ref', image) return False v2_type = get_manifest_media_type('v2') reg_client = self._get_registry_client(image.registry) manifest_list_response = reg_client.get_manifest_list(image) if not manifest_list_response: self.log.warning('Could not fetch manifest list for %s', image) return False manifest_list_data = {} manifest_list = json.loads(manifest_list_response.content) for manifest in manifest_list['manifests']: if manifest['mediaType'] != v2_type: self.log.warning('Unexpected media type in manifest list: %s', manifest) return False arch = manifest['platform']['architecture'] v2_digest = manifest['digest'] manifest_list_data[arch] = v2_digest archives = self.koji_session.listArchives(build_id) koji_archives_data = {} for archive in (a for a in archives if a['btype'] == KOJI_BTYPE_IMAGE): arch = archive['extra']['docker']['config']['architecture'] v2_digest = archive['extra']['docker']['digests'][v2_type] koji_archives_data[arch] = v2_digest platform_to_arch_dict = self.workflow.conf.platform_to_goarch_mapping architectures = [ platform_to_arch_dict[platform] for platform in self.platforms ] missing_arches = [ a for a in architectures if a not in koji_archives_data ] if missing_arches: self.log.warning( 'Architectures "%s" are missing in Koji archives "%s"', missing_arches, koji_archives_data) return False # manifest lists can be manually pushed to the registry to make sure a specific tag # (e.g., latest) is available for all platforms. # In such cases these manifest lists may include images from different koji builds. # We only want to check the digests for the images built in the current parent koji build err_msg = 'Manifest list digest %s differs from Koji archive digest %s for platform %s' unmatched_digests = False for arch in architectures: if manifest_list_data[arch] != koji_archives_data[arch]: unmatched_digests = True self.log.warning(err_msg, manifest_list_data[arch], koji_archives_data[arch], arch) if unmatched_digests: return False self.log.info( 'Deeper manifest list check verified v2 manifest references match') return True
def set_group_manifest_info(self, extra): version_release = None primary_images = get_primary_images(self.workflow) if primary_images: version_release = primary_images[0].tag if is_scratch_build(self.workflow): tags = [image.tag for image in self.workflow.data.tag_conf.images] version_release = tags[0] else: assert version_release is not None, 'Unable to find version-release image' tags = [image.tag for image in primary_images] floating_tags = [ image.tag for image in get_floating_images(self.workflow) ] unique_images = get_unique_images(self.workflow) unique_tags = [image.tag for image in unique_images] manifest_data = self.workflow.data.postbuild_results.get( PLUGIN_GROUP_MANIFESTS_KEY, {}) if manifest_data and is_manifest_list(manifest_data.get("media_type")): manifest_digest = manifest_data["manifest_digest"] digest = manifest_digest.default build_image = unique_images[0] repo = ImageName.parse(build_image).to_str(registry=False, tag=False) # group_manifests added the registry, so this should be valid registry_uri = self.workflow.conf.registry['uri'] digest_version = get_manifest_media_version(manifest_digest) media_type = get_manifest_media_type(digest_version) extra['image']['index'] = { 'tags': tags, 'floating_tags': floating_tags, 'unique_tags': unique_tags, 'pull': [ f'{registry_uri}/{repo}@{digest}', f'{registry_uri}/{repo}:{version_release}', ], 'digests': { media_type: digest }, } # group_manifests returns None if didn't run, {} if group=False else: platform = "x86_64" _, instance = next( self._iter_build_metadata_outputs(platform, {"type": "docker-image"}), (None, None), ) if instance: # koji_upload, running in the worker, doesn't have the full tags # so set them here instance['extra']['docker']['tags'] = tags instance['extra']['docker']['floating_tags'] = floating_tags instance['extra']['docker']['unique_tags'] = unique_tags repositories = [] for pullspec in instance['extra']['docker']['repositories']: if '@' not in pullspec: image = ImageName.parse(pullspec) image.tag = version_release pullspec = image.to_str() repositories.append(pullspec) instance['extra']['docker']['repositories'] = repositories self.log.debug("reset tags to so that docker is %s", instance['extra']['docker'])
def set_group_manifest_info(self, extra, worker_metadatas): version_release = None primary_images = get_primary_images(self.workflow) floating_images = get_floating_images(self.workflow) unique_images = get_unique_images(self.workflow) if primary_images: version_release = primary_images[0].tag if is_scratch_build(self.workflow): tags = [image.tag for image in self.workflow.tag_conf.images] version_release = tags[0] else: assert version_release is not None, 'Unable to find version-release image' tags = [image.tag for image in primary_images] floating_tags = [image.tag for image in floating_images] unique_tags = [image.tag for image in unique_images] manifest_data = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY, {}) if manifest_data and is_manifest_list(manifest_data.get("media_type")): manifest_digest = manifest_data.get("manifest_digest") index = {} index['tags'] = tags index['floating_tags'] = floating_tags index['unique_tags'] = unique_tags build_image = get_unique_images(self.workflow)[0] repo = ImageName.parse(build_image).to_str(registry=False, tag=False) # group_manifests added the registry, so this should be valid registries = self.workflow.push_conf.all_registries digest_version = get_manifest_media_version(manifest_digest) digest = manifest_digest.default for registry in registries: pullspec = "{0}/{1}@{2}".format(registry.uri, repo, digest) index['pull'] = [pullspec] pullspec = "{0}/{1}:{2}".format(registry.uri, repo, version_release) index['pull'].append(pullspec) # Store each digest with according media type index['digests'] = {} media_type = get_manifest_media_type(digest_version) index['digests'][media_type] = digest break extra['image']['index'] = index # group_manifests returns None if didn't run, {} if group=False else: for platform in worker_metadatas: if platform == "x86_64": for instance in worker_metadatas[platform]['output']: if instance['type'] == 'docker-image': # koji_upload, running in the worker, doesn't have the full tags # so set them here instance['extra']['docker']['tags'] = tags instance['extra']['docker']['floating_tags'] = floating_tags instance['extra']['docker']['unique_tags'] = unique_tags repositories = [] for pullspec in instance['extra']['docker']['repositories']: if '@' not in pullspec: image = ImageName.parse(pullspec) image.tag = version_release pullspec = image.to_str() repositories.append(pullspec) instance['extra']['docker']['repositories'] = repositories self.log.debug("reset tags to so that docker is %s", instance['extra']['docker']) annotations = get_worker_build_info(self.workflow, platform).\ build.get_annotations() digests = {} if 'digests' in annotations: digests = get_digests_map_from_annotations(annotations['digests']) instance['extra']['docker']['digests'] = digests
def get_output(workflow: DockerBuildWorkflow, buildroot_id: str, pullspec: ImageName, platform: str, source_build: bool = False): """ Build the 'output' section of the metadata. :param buildroot_id: str, buildroot_id :param pullspec: ImageName :param platform: str, output platform :param source_build: bool, is source_build ? :param logs: list, of Output logs :return: tuple, list of Output instances, and extra Output file """ def add_buildroot_id(output: Output) -> Output: output.metadata.update({'buildroot_id': buildroot_id}) return output extra_output_file = None output_files: List[Output] = [] image_id: str if source_build: manifest = workflow.data.koji_source_manifest image_id = manifest['config']['digest'] # we are using digest from manifest, because we can't get diff_ids # unless we pull image, which would fail due because there are so many layers layer_sizes = [{ 'digest': layer['digest'], 'size': layer['size'] } for layer in manifest['layers']] platform = os.uname()[4] else: imageutil = workflow.imageutil image_id = imageutil.get_inspect_for_image(pullspec, platform=platform)['Id'] inspect = imageutil.base_image_inspect(platform) parent_id = inspect['Id'] if inspect else None image_archive = str( workflow.build_dir.platform_dir(platform).exported_squashed_image) layer_sizes = imageutil.get_uncompressed_image_layer_sizes( image_archive) digests = get_manifest_digests(pullspec, workflow.conf.registry['uri'], workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None)) if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config = get_config_from_registry( pullspec, workflow.conf.registry['uri'], config_manifest_digest, workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None), config_manifest_type) # We don't need container_config section if config and 'container_config' in config: del config['container_config'] digest_pullspec = f"{pullspec.to_str(tag=False)}@{select_digest(digests)}" repositories = [pullspec.to_str(), digest_pullspec] typed_digests = { get_manifest_media_type(version): digest for version, digest in digests.items() if version != "v1" } tag_conf = workflow.data.tag_conf if source_build: tags = sorted(set(image.tag for image in tag_conf.images)) else: tags = sorted( image.tag for image in tag_conf.get_unique_images_with_platform(platform)) # since we are storing oci image as v2s2 all images now have 'docker-archive' type metadata, output = get_image_output(IMAGE_TYPE_DOCKER_ARCHIVE, image_id, platform, pullspec) metadata.update({ 'arch': platform, 'type': 'docker-image', 'components': [], 'extra': { 'image': { 'arch': platform, }, 'docker': { 'id': image_id, 'repositories': repositories, 'layer_sizes': layer_sizes, 'tags': tags, 'config': config, 'digests': typed_digests, }, }, }) if not config: del metadata['extra']['docker']['config'] if not source_build: metadata['components'] = get_image_components(workflow.data, platform) if parent_id is not None: metadata['extra']['docker']['parent_id'] = parent_id # Add the 'docker save' image to the output image = add_buildroot_id(output) if source_build: output_files.append(metadata) extra_output_file = output else: output_files.append(image) return output_files, extra_output_file
import pytest if MOCK: from tests.docker_mock import mock_docker KOJI_HUB = 'http://koji.com/hub' KOJI_BUILD_ID = 123456789 KOJI_BUILD_NVR = 'base-image-1.0-99' KOJI_STATE_COMPLETE = koji.BUILD_STATES['COMPLETE'] V2_LIST = get_manifest_media_type('v2_list') KOJI_EXTRA = {'image': {'index': {'digests': {V2_LIST: 'stubDigest'}}}} KOJI_STATE_DELETED = koji.BUILD_STATES['DELETED'] KOJI_BUILD = {'nvr': KOJI_BUILD_NVR, 'id': KOJI_BUILD_ID, 'state': KOJI_STATE_COMPLETE, 'extra': KOJI_EXTRA} DELETED_KOJI_BUILD = {'nvr': KOJI_BUILD_NVR, 'id': KOJI_BUILD_ID, 'state': KOJI_STATE_DELETED} BASE_IMAGE_LABELS = { 'com.redhat.component': 'base-image', 'version': '1.0', 'release': '99', }
def set_group_manifest_info(self, extra, worker_metadatas): version_release = None primary_images = get_primary_images(self.workflow) for image in primary_images: if '-' in image.tag: # {version}-{release} only, and only one instance version_release = image.tag break assert version_release is not None, 'Unable to find version-release image' tags = [image.tag for image in primary_images] manifest_list_digests = self.workflow.postbuild_results.get( PLUGIN_GROUP_MANIFESTS_KEY) if manifest_list_digests: index = {} index['tags'] = tags repositories = self.workflow.build_result.annotations[ 'repositories']['unique'] repo = ImageName.parse(repositories[0]).to_str(registry=False, tag=False) # group_manifests added the registry, so this should be valid registries = self.workflow.push_conf.pulp_registries if not registries: registries = self.workflow.push_conf.all_registries for registry in registries: manifest_list_digest = manifest_list_digests[repo] pullspec = "{0}/{1}@{2}".format(registry.uri, repo, manifest_list_digest.default) index['pull'] = [pullspec] pullspec = "{0}/{1}:{2}".format(registry.uri, repo, version_release) index['pull'].append(pullspec) # Store each digest with according media type index['digests'] = {} for version, digest in manifest_list_digest.items(): if digest: media_type = get_manifest_media_type(version) index['digests'][media_type] = digest break extra['image']['index'] = index # group_manifests returns None if didn't run, {} if group=False else: for platform in worker_metadatas: if platform == "x86_64": for instance in worker_metadatas[platform]['output']: if instance['type'] == 'docker-image': # koji_upload, running in the worker, doesn't have the full tags # so set them here instance['extra']['docker']['tags'] = tags repositories = [] for pullspec in instance['extra']['docker'][ 'repositories']: if '@' not in pullspec: image = ImageName.parse(pullspec) image.tag = version_release pullspec = image.to_str() repositories.append(pullspec) instance['extra']['docker'][ 'repositories'] = repositories self.log.debug( "reset tags to so that docker is %s", instance['extra']['docker']) annotations = get_worker_build_info(self.workflow, platform).\ build.get_annotations() digests = {} if 'digests' in annotations: digests = get_digests_map_from_annotations( annotations['digests']) instance['extra']['docker'][ 'digests'] = digests
def test_get_manifest_media_type(version, expected): assert get_manifest_media_type(version) == expected
def get_output(workflow: DockerBuildWorkflow, buildroot_id: str, pullspec: ImageName, platform: str, source_build: bool = False): """ Build the 'output' section of the metadata. :param buildroot_id: str, buildroot_id :param pullspec: ImageName :param platform: str, output platform :param source_build: bool, is source_build ? :param logs: list, of Output logs :return: tuple, list of Output instances, and extra Output file """ def add_buildroot_id(output: Output) -> Output: output.metadata.update({'buildroot_id': buildroot_id}) return output extra_output_file = None output_files: List[Output] = [] image_id: str if source_build: manifest = workflow.data.koji_source_manifest image_id = manifest['config']['digest'] # we are using digest from manifest, because we can't get diff_ids # unless we pull image, which would fail due because there are so many layers layer_sizes = [{ 'digest': layer['digest'], 'size': layer['size'] } for layer in manifest['layers']] platform = os.uname()[4] else: imageutil = workflow.imageutil image_id = imageutil.get_inspect_for_image(pullspec, platform=platform)['Id'] parent_id = None if not workflow.data.dockerfile_images.base_from_scratch: parent_id = imageutil.base_image_inspect(platform)['Id'] image_archive = str( workflow.build_dir.platform_dir(platform).exported_squashed_image) layer_sizes = imageutil.get_uncompressed_image_layer_sizes( image_archive) digests = get_manifest_digests(pullspec, workflow.conf.registry['uri'], workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None)) if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config = get_config_from_registry( pullspec, workflow.conf.registry['uri'], config_manifest_digest, workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None), config_manifest_type) # We don't need container_config section if config and 'container_config' in config: del config['container_config'] digest_pullspec = f"{pullspec.to_str(tag=False)}@{select_digest(digests)}" repositories = [pullspec.to_str(), digest_pullspec] typed_digests = { get_manifest_media_type(version): digest for version, digest in digests.items() if version != "v1" } tag_conf = workflow.data.tag_conf if source_build: image_type = IMAGE_TYPE_DOCKER_ARCHIVE tags = sorted(set(image.tag for image in tag_conf.images)) else: image_metadatas = workflow.data.postbuild_results[ FetchDockerArchivePlugin.key] image_type = image_metadatas[platform]["type"] tags = sorted( image.tag for image in tag_conf.get_unique_images_with_platform(platform)) metadata, output = get_image_output(image_type, image_id, platform, pullspec) metadata.update({ 'arch': platform, 'type': 'docker-image', 'components': [], 'extra': { 'image': { 'arch': platform, }, 'docker': { 'id': image_id, 'repositories': repositories, 'layer_sizes': layer_sizes, 'tags': tags, 'config': config, 'digests': typed_digests, }, }, }) if not config: del metadata['extra']['docker']['config'] if not source_build: metadata['components'] = get_image_components(workflow, platform) if not workflow.data.dockerfile_images.base_from_scratch: metadata['extra']['docker']['parent_id'] = parent_id # Add the 'docker save' image to the output image = add_buildroot_id(output) # when doing regular build, worker already uploads image, # so orchestrator needs only metadata, # but source contaiener build didn't upload that image yet, # so we want metadata, and the image to upload if source_build: output_files.append(metadata) extra_output_file = output else: output_files.append(image) if not source_build: # add operator manifests to output operator_manifests_path = (workflow.data.postbuild_results.get( PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY)) if operator_manifests_path: manifests_metadata = get_output_metadata( operator_manifests_path, OPERATOR_MANIFESTS_ARCHIVE) operator_manifests_output = Output( filename=operator_manifests_path, metadata=manifests_metadata) add_custom_type(operator_manifests_output, KOJI_BTYPE_OPERATOR_MANIFESTS) operator_manifests = add_buildroot_id(operator_manifests_output) output_files.append(operator_manifests) return output_files, extra_output_file
def test_multiple_parent_images(self, workflow, koji_session, koji_mtype, special_base, parent_tags, media_version, caplog): parent_images = { ImageName.parse('somebuilder'): ImageName.parse('somebuilder:{}' .format(parent_tags[0])), ImageName.parse('otherbuilder'): ImageName.parse('otherbuilder:{}' .format(parent_tags[1])), ImageName.parse('base'): ImageName.parse('base:{}'.format(parent_tags[2])), } media_type = get_manifest_media_type(media_version) workflow.builder.parent_images_digests = {} for parent in parent_images: dgst = parent_images[parent].tag workflow.builder.parent_images_digests[parent.to_str()] = {media_type: dgst} if not koji_mtype: media_type = get_manifest_media_type('v1') extra = {'image': {'index': {'digests': {media_type: 'stubDigest'}}}} koji_builds = dict( somebuilder=dict(nvr='somebuilder-1.0-1', id=42, state=KOJI_STATE_COMPLETE, extra=extra), otherbuilder=dict(nvr='otherbuilder-2.0-1', id=43, state=KOJI_STATE_COMPLETE, extra=extra), base=dict(nvr=KOJI_BUILD_NVR, id=KOJI_BUILD_ID, state=KOJI_STATE_COMPLETE, extra=extra), unresolved=None, ) image_inspects = {} koji_expects = {} # need to load up our mock objects with expected responses for the parents for img, build in koji_builds.items(): if build is None: continue name, version, release = koji_builds[img]['nvr'].rsplit('-', 2) labels = {'com.redhat.component': name, 'version': version, 'release': release} image_inspects[img] = {INSPECT_CONFIG: dict(Labels=labels)} workflow.builder.set_parent_inspection_data(parent_images[ImageName.parse(img)], image_inspects[img]) (koji_session.should_receive('getBuild') .with_args(koji_builds[img]['nvr']) .and_return(koji_builds[img])) koji_expects[ImageName.parse(img)] = build dockerfile_images = [] for parent in parent_images: dockerfile_images.append(parent.to_str()) if special_base == 'scratch': workflow.builder.set_image(ImageName.parse(SCRATCH_FROM)) dockerfile_images.append('scratch') elif special_base == 'custom': workflow.builder.set_image(ImageName.parse('koji/image-build')) dockerfile_images.append('koji/image-build') else: workflow.builder.set_image(ImageName.parse('basetag')) workflow.builder.set_inspection_data(image_inspects['base']) workflow.builder.set_dockerfile_images(dockerfile_images) for parent, local in parent_images.items(): workflow.builder.dockerfile_images[parent] = local expected = { BASE_IMAGE_KOJI_BUILD: koji_builds['base'], PARENT_IMAGES_KOJI_BUILDS: koji_expects, } if special_base: del expected[BASE_IMAGE_KOJI_BUILD] if not koji_mtype: self.run_plugin_with_args( workflow, expect_result=expected ) assert 'does not have manifest digest data for the expected media type' in caplog.text elif 'miss' in parent_tags or not koji_mtype: self.run_plugin_with_args( workflow, expect_result=expected ) errors = [] error_msg = ('Manifest digest (miss) for parent image {}:latest does not match value ' 'in its koji reference (stubDigest)') if parent_tags[0] == 'miss': errors.append(error_msg.format('somebuilder')) if parent_tags[1] == 'miss': errors.append(error_msg.format('otherbuilder')) if parent_tags[2] == 'miss': errors.append(error_msg.format('base')) assert 'This parent image MUST be rebuilt' in caplog.text for e in errors: assert e in caplog.text else: self.run_plugin_with_args( workflow, expect_result=expected )
def test_multiple_parent_images(self, workflow, koji_session, reactor_config_map, special_base, parent_tags, media_version): parent_images = { ImageName.parse('somebuilder'): ImageName.parse('somebuilder:{}' .format(parent_tags[0])), ImageName.parse('otherbuilder'): ImageName.parse('otherbuilder:{}' .format(parent_tags[1])), ImageName.parse('base'): ImageName.parse('base:{}'.format(parent_tags[2])), } media_type = get_manifest_media_type(media_version) workflow.builder.parent_images_digests = {} for parent in parent_images: dgst = parent_images[parent].tag workflow.builder.parent_images_digests[parent.to_str()] = {media_type: dgst} extra = {'image': {'index': {'digests': {media_type: 'stubDigest'}}}} koji_builds = dict( somebuilder=dict(nvr='somebuilder-1.0-1', id=42, state=KOJI_STATE_COMPLETE, extra=extra), otherbuilder=dict(nvr='otherbuilder-2.0-1', id=43, state=KOJI_STATE_COMPLETE, extra=extra), base=dict(nvr=KOJI_BUILD_NVR, id=KOJI_BUILD_ID, state=KOJI_STATE_COMPLETE, extra=extra), unresolved=None, ) image_inspects = {} koji_expects = {} # need to load up our mock objects with expected responses for the parents for img, build in koji_builds.items(): if build is None: continue name, version, release = koji_builds[img]['nvr'].rsplit('-', 2) labels = {'com.redhat.component': name, 'version': version, 'release': release} image_inspects[img] = {INSPECT_CONFIG: dict(Labels=labels)} (workflow.builder.tasker .should_receive('inspect_image') .with_args(parent_images[ImageName.parse(img)]) .and_return(image_inspects[img])) (koji_session.should_receive('getBuild') .with_args(koji_builds[img]['nvr']) .and_return(koji_builds[img])) koji_expects[ImageName.parse(img)] = build if special_base == 'scratch': workflow.builder.set_base_image(SCRATCH_FROM) elif special_base == 'custom': workflow.builder.set_base_image('koji/image-build') parent_images[ImageName.parse('koji/image-build')] = None else: workflow.builder.set_base_image('basetag') workflow.builder.base_image_inspect.update(image_inspects['base']) workflow.builder.parent_images = parent_images expected = { BASE_IMAGE_KOJI_BUILD: koji_builds['base'], PARENT_IMAGES_KOJI_BUILDS: koji_expects, } if special_base: del expected[BASE_IMAGE_KOJI_BUILD] if 'miss' in parent_tags: with pytest.raises(PluginFailedException) as exc: self.run_plugin_with_args( workflow, expect_result=expected, reactor_config_map=reactor_config_map ) errors = [] error_msg = ('Manifest digest (miss) for parent image {}:latest does not match value ' 'in its koji reference (stubDigest). This parent image MUST be rebuilt') if parent_tags[0] == 'miss': errors.append(error_msg.format('somebuilder')) if parent_tags[1] == 'miss': errors.append(error_msg.format('otherbuilder')) if parent_tags[2] == 'miss': errors.append(error_msg.format('base')) assert 'This parent image MUST be rebuilt' in str(exc) for e in errors: assert e in str(exc) else: self.run_plugin_with_args( workflow, expect_result=expected, reactor_config_map=reactor_config_map )
import pytest if MOCK: from tests.docker_mock import mock_docker KOJI_HUB = 'http://koji.com/hub' KOJI_BUILD_ID = 123456789 KOJI_BUILD_NVR = 'base-image-1.0-99' KOJI_STATE_COMPLETE = koji.BUILD_STATES['COMPLETE'] KOJI_STATE_BUILDING = koji.BUILD_STATES['BUILDING'] V2_LIST = get_manifest_media_type('v2_list') V2 = get_manifest_media_type('v2') KOJI_EXTRA = {'image': {'index': {'digests': {V2_LIST: 'stubDigest'}}}} KOJI_STATE_DELETED = koji.BUILD_STATES['DELETED'] KOJI_BUILD_BUILDING = {'nvr': KOJI_BUILD_NVR, 'id': KOJI_BUILD_ID, 'state': KOJI_STATE_BUILDING, 'extra': KOJI_EXTRA} KOJI_BUILD = {'nvr': KOJI_BUILD_NVR, 'id': KOJI_BUILD_ID, 'state': KOJI_STATE_COMPLETE, 'extra': KOJI_EXTRA} DELETED_KOJI_BUILD = {'nvr': KOJI_BUILD_NVR, 'id': KOJI_BUILD_ID, 'state': KOJI_STATE_DELETED} BASE_IMAGE_LABELS = { 'com.redhat.component': 'base-image',
def set_group_manifest_info(self, extra, worker_metadatas): version_release = None primary_images = get_primary_images(self.workflow) for image in primary_images: if '-' in image.tag: # {version}-{release} only, and only one instance version_release = image.tag break assert version_release is not None, 'Unable to find version-release image' tags = [image.tag for image in primary_images] manifest_list_digests = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY) if manifest_list_digests: index = {} index['tags'] = tags repositories = self.workflow.build_result.annotations['repositories']['unique'] repo = ImageName.parse(repositories[0]).to_str(registry=False, tag=False) # group_manifests added the registry, so this should be valid registries = self.workflow.push_conf.pulp_registries if not registries: registries = self.workflow.push_conf.all_registries for registry in registries: manifest_list_digest = manifest_list_digests[repo] pullspec = "{0}/{1}@{2}".format(registry.uri, repo, manifest_list_digest.default) index['pull'] = [pullspec] pullspec = "{0}/{1}:{2}".format(registry.uri, repo, version_release) index['pull'].append(pullspec) # Store each digest with according media type index['digests'] = {} for version, digest in manifest_list_digest.items(): if digest: media_type = get_manifest_media_type(version) index['digests'][media_type] = digest break extra['image']['index'] = index # group_manifests returns None if didn't run, {} if group=False else: for platform in worker_metadatas: if platform == "x86_64": for instance in worker_metadatas[platform]['output']: if instance['type'] == 'docker-image': # koji_upload, running in the worker, doesn't have the full tags # so set them here instance['extra']['docker']['tags'] = tags repositories = [] for pullspec in instance['extra']['docker']['repositories']: if '@' not in pullspec: image = ImageName.parse(pullspec) image.tag = version_release pullspec = image.to_str() repositories.append(pullspec) instance['extra']['docker']['repositories'] = repositories self.log.debug("reset tags to so that docker is %s", instance['extra']['docker']) annotations = get_worker_build_info(self.workflow, platform).\ build.get_annotations() digests = {} if 'digests' in annotations: digests = get_digests_map_from_annotations(annotations['digests']) instance['extra']['docker']['digests'] = digests
def group_manifests_and_tag(self, session, worker_digests): """ Creates a manifest list or OCI image index that groups the different manifests in worker_digests, then tags the result with with all the configured tags found in workflow.tag_conf. """ self.log.info("%s: Creating manifest list", session.registry) # Extract information about the manifests that we will group - we get the # size and content type of the manifest by querying the registry manifests = [] for platform, worker_image in worker_digests.items(): repository = worker_image['repository'] digest = worker_image['digest'] media_type = get_manifest_media_type(worker_image['version']) if media_type not in self.manifest_media_types: continue content, _, media_type, size = self.get_manifest(session, repository, digest) manifests.append({ 'content': content, 'repository': repository, 'digest': digest, 'size': size, 'media_type': media_type, 'architecture': self.goarch.get(platform, platform), }) list_type, list_json = self.build_list(manifests) self.log.info("%s: Created manifest, Content-Type=%s\n%s", session.registry, list_type, list_json) # Now push the manifest list to the registry once per each tag self.log.info("%s: Tagging manifest list", session.registry) for image in self.workflow.tag_conf.images: target_repo = image.to_str(registry=False, tag=False) # We have to call store_manifest_in_repository directly for each # referenced manifest, since they potentially come from different repos for manifest in manifests: self.store_manifest_in_repository(session, manifest['content'], manifest['media_type'], manifest['repository'], target_repo, digest=manifest['digest']) self.store_manifest_in_repository(session, list_json, list_type, target_repo, target_repo, tag=image.tag) # Get the digest of the manifest list using one of the tags registry_image = self.workflow.tag_conf.unique_images[0] _, digest_str, _, _ = self.get_manifest(session, registry_image.to_str(registry=False, tag=False), registry_image.tag) if list_type == MEDIA_TYPE_OCI_V1_INDEX: digest = ManifestDigest(oci_index=digest_str) else: digest = ManifestDigest(v2_list=digest_str) # And store the manifest list in the push_conf push_conf_registry = self.workflow.push_conf.add_docker_registry(session.registry, insecure=session.insecure) for image in self.workflow.tag_conf.images: push_conf_registry.digests[image.tag] = digest self.log.info("%s: Manifest list digest is %s", session.registry, digest_str) return registry_image.get_repo(explicit_namespace=False), digest
def group_manifests_and_tag(self, session, worker_digests): """ Creates a manifest list or OCI image index that groups the different manifests in worker_digests, then tags the result with with all the configured tags found in workflow.tag_conf. """ self.log.info("%s: Creating manifest list", session.registry) # Extract information about the manifests that we will group - we get the # size and content type of the manifest by querying the registry manifests = [] for platform, worker_image in worker_digests.items(): repository = worker_image['repository'] digest = worker_image['digest'] media_type = get_manifest_media_type(worker_image['version']) if media_type not in self.manifest_util.manifest_media_types: continue content, _, media_type, size = self.manifest_util.get_manifest( session, repository, digest) manifests.append({ 'content': content, 'repository': repository, 'digest': digest, 'size': size, 'media_type': media_type, 'architecture': self.goarch.get(platform, platform), }) list_type, list_json = self.manifest_util.build_list(manifests) self.log.info("%s: Created manifest, Content-Type=%s\n%s", session.registry, list_type, list_json) # Now push the manifest list to the registry once per each tag self.log.info("%s: Tagging manifest list", session.registry) for image in self.non_floating_images: target_repo = image.to_str(registry=False, tag=False) # We have to call store_manifest_in_repository directly for each # referenced manifest, since they potentially come from different repos for manifest in manifests: self.manifest_util.store_manifest_in_repository( session, manifest['content'], manifest['media_type'], manifest['repository'], target_repo, ref=manifest['digest']) self.manifest_util.store_manifest_in_repository(session, list_json, list_type, target_repo, target_repo, ref=image.tag) # Get the digest of the manifest list using one of the tags registry_image = get_unique_images(self.workflow)[0] _, digest_str, _, _ = self.manifest_util.get_manifest( session, registry_image.to_str(registry=False, tag=False), registry_image.tag) if list_type == MEDIA_TYPE_OCI_V1_INDEX: digest = ManifestDigest(oci_index=digest_str) else: digest = ManifestDigest(v2_list=digest_str) # And store the manifest list in the push_conf push_conf_registry = self.workflow.push_conf.add_docker_registry( session.registry, insecure=session.insecure) tags = [] for image in self.non_floating_images: push_conf_registry.digests[image.tag] = digest tags.append(image.tag) self.log.info("%s: Manifest list digest is %s", session.registry, digest_str) self.log.debug("tags: %s digest: %s", tags, digest) return { 'manifest': list_json, 'media_type': list_type, 'manifest_digest': digest }
def test_get_manifest_media_type_unknown(): with pytest.raises(RuntimeError): assert get_manifest_media_type('no_such_version')
def test_get_manifest_media_type(version, expected): assert get_manifest_media_type(version) == expected
def test_get_manifest_media_type_unknown(): with pytest.raises(RuntimeError): assert get_manifest_media_type('no_such_version')