def test_get_manifest_digests_connection_error(tmpdir): # Test that our code to handle falling back from https to http # doesn't do anything unexpected when a connection can't be # made at all. kwargs = {} kwargs['image'] = ImageName.parse('example.com/spam:latest') kwargs['registry'] = 'https://example.com' url = 'https://example.com/v2/spam/manifests/latest' responses.add(responses.GET, url, body=ConnectionError()) with pytest.raises(ConnectionError): get_manifest_digests(**kwargs)
def test_get_manifest_digests_connection_error(tmpdir): # Test that our code to handle falling back from https to http # doesn't do anything unexpected when a connection can't be # made at all. kwargs = {} kwargs['image'] = ImageName.parse('example.com/spam:latest') kwargs['registry'] = 'https://example.com' url = 'https://example.com/v2/spam/manifests/latest' responses.add(responses.GET, url, body=ConnectionError()) with pytest.raises(ConnectionError): get_manifest_digests(**kwargs)
def get_grouped_manifests(self): grouped_manifests = [] for registry, registry_conf in self.registries.items(): if registry_conf.get('version') == 'v1': continue manifest_list_spec = {} manifest_list_spec['manifests'] = [] all_annotations = self.workflow.build_result.annotations[ 'worker-builds'] for platform in all_annotations: worker_image = all_annotations[platform]['digests'][0] tag = worker_image['tag'] repository = worker_image['repository'] arch_entry = { 'image': '{0}/{1}:{2}'.format(registry, repository, tag), 'platform': { 'os': 'linux', 'architecture': self.goarch.get(platform, platform) } } manifest_list_spec['manifests'].append(arch_entry) manifest_list_spec['tags'] = [ image.tag for image in self.workflow.tag_conf.images ] # use a unique image tag because manifest-tool can't accept a digest that # isn't in the respository yet registry_image = self.workflow.tag_conf.unique_images[0] registry_image.registry = registry manifest_list_spec['image'] = registry_image.to_str() self.log.info("Submitting manifest-list spec %s", manifest_list_spec) self.submit_manifest_list(registry, registry_conf, manifest_list_spec) insecure = registry_conf.get('insecure', False) secret_path = registry_conf.get('secret') self.log.debug('attempting get_manifest_digests from %s for %s', registry, registry_image) manifest_list_digest = get_manifest_digests( registry_image, registry=registry, insecure=insecure, dockercfg_path=secret_path, versions=('v2_list', )) if not manifest_list_digest.v2_list: raise PluginFailedException('no manifest list digest for %s', registry) self.log.debug('Digest for registry %s is %s', registry, manifest_list_digest.v2_list) push_conf_registry = self.workflow.push_conf.add_docker_registry( registry, insecure=insecure) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = manifest_list_digest grouped_manifests.append(manifest_list_digest) self.log.info( "Manifest lists created and collected for all repositories") return grouped_manifests
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry if self.need_skopeo_push(): self.push_with_skopeo(registry_image, insecure, docker_push_secret) else: self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) defer_removal(self.workflow, registry_image) pushed_images.append(registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info("V2 schema 2 or OCI manifest is not available to get config from") self.log.info("All images were tagged and pushed") return pushed_images
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) first_v2_digest = None first_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry logs = self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) pushed_images.append(registry_image) defer_removal(self.workflow, registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not first_v2_digest and digests.v2: first_v2_digest = digests.v2 first_registry_image = registry_image if first_v2_digest: push_conf_registry.config = get_config_from_registry( first_registry_image, registry, first_v2_digest, insecure, docker_push_secret, 'v2') else: self.log.info("V2 schema 2 digest is not available") self.log.info("All images were tagged and pushed") return pushed_images
def pin_digest(self, image): """ Replace image tag with manifest list digest :param image: ImageName :return: ImageName """ if image.tag.startswith("sha256:"): self.log.debug("%s looks like a digest, skipping query", image.tag) return image self.log.debug("Querying %s for manifest list digest", image.registry) digests = get_manifest_digests(image, image.registry, versions=("v2_list",)) return self._replace(image, tag=digests["v2_list"])
def test_get_manifest_digests_missing(tmpdir, v1_digest, v2_digest): kwargs = {} image = ImageName.parse('example.com/spam:latest') kwargs['image'] = image kwargs['registry'] = 'https://example.com' url = 'https://example.com/v2/spam/manifests/latest' def request_callback(request): media_type = request.headers['Accept'] media_type_prefix = media_type.split('+')[0] # If requested schema version is not available, attempt to # fallback to other version if possible to simulate how # a docker registry behaves if media_type.endswith('v2+json') and v2_digest: digest = 'v2-digest' elif media_type.endswith('v2+json') and v1_digest: digest = 'not-used' media_type_prefix = media_type_prefix.replace('v2', 'v1', 1) elif media_type.endswith('v1+json') and v1_digest: digest = 'v1-digest' elif media_type.endswith('v1+json') and v2_digest: digest = 'not-used' media_type_prefix = media_type_prefix.replace('v1', 'v2', 1) else: raise ValueError('Unexpected media type {}'.format(media_type)) headers = { 'Content-Type': '{}+jsonish'.format(media_type_prefix), 'Docker-Content-Digest': digest } return (200, headers, '') responses.add_callback(responses.GET, url, callback=request_callback) actual_digests = get_manifest_digests(**kwargs) if v1_digest: assert actual_digests.v1 == 'v1-digest' else: assert actual_digests.v1 is None if v2_digest: assert actual_digests.v2 == 'v2-digest' else: assert actual_digests.v2 is None
def test_get_manifest_digests_missing(tmpdir, v1_digest, v2_digest): kwargs = {} image = ImageName.parse('example.com/spam:latest') kwargs['image'] = image kwargs['registry'] = 'https://example.com' url = 'https://example.com/v2/spam/manifests/latest' def request_callback(request): media_type = request.headers['Accept'] media_type_prefix = media_type.split('+')[0] # If requested schema version is not available, attempt to # fallback to other version if possible to simulate how # a docker registry behaves if media_type.endswith('v2+json') and v2_digest: digest = 'v2-digest' elif media_type.endswith('v2+json') and v1_digest: digest = 'not-used' media_type_prefix = media_type_prefix.replace('v2', 'v1', 1) elif media_type.endswith('v1+json') and v1_digest: digest = 'v1-digest' elif media_type.endswith('v1+json') and v2_digest: digest = 'not-used' media_type_prefix = media_type_prefix.replace('v1', 'v2', 1) else: raise ValueError('Unexpected media type {}'.format(media_type)) headers = { 'Content-Type': '{}+jsonish'.format(media_type_prefix), 'Docker-Content-Digest': digest } return (200, headers, '') responses.add_callback(responses.GET, url, callback=request_callback) actual_digests = get_manifest_digests(**kwargs) if v1_digest: assert actual_digests.v1 == 'v1-digest' else: assert actual_digests.v1 is None if v2_digest: assert actual_digests.v2 == 'v2-digest' else: assert actual_digests.v2 is None
def get_digests(self): """ Returns a map of repositories to digests """ digests = {} # repository -> digest registry = self.workflow.conf.registry for image in self.workflow.data.tag_conf.images: image_digests = get_manifest_digests(image, registry['uri'], registry['insecure'], registry.get('secret', None)) if image_digests: digests[image.to_str()] = image_digests return digests
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) first_v2_digest = None first_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry logs = self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) pushed_images.append(registry_image) defer_removal(self.workflow, registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not first_v2_digest and digests.v2: first_v2_digest = digests.v2 first_registry_image = registry_image if first_v2_digest: push_conf_registry.config = get_config_from_registry( first_registry_image, registry, first_v2_digest, insecure, docker_push_secret, 'v2') else: self.log.info("V2 schema 2 digest is not available") self.log.info("All images were tagged and pushed") return pushed_images
def test_get_manifest_digests(tmpdir, image, registry, insecure, creds, versions, url): kwargs = {} image = ImageName.parse(image) kwargs['image'] = image if creds: temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig: dockerconfig.write(json.dumps({ image.registry: { 'username': creds[0], 'password': creds[1] } })) kwargs['dockercfg_path'] = temp_dir kwargs['registry'] = registry if insecure is not None: kwargs['insecure'] = insecure if versions is not None: kwargs['versions'] = versions def request_callback(request): if creds and creds[0] and creds[1]: assert request.headers['Authorization'] media_type = request.headers['Accept'] if media_type.endswith('v2+json'): digest = 'v2-digest' elif media_type.endswith('v1+json'): digest = 'v1-digest' else: raise ValueError('Unexpected media type {}'.format(media_type)) media_type_prefix = media_type.split('+')[0] headers = { 'Content-Type': '{}+jsonish'.format(media_type_prefix), 'Docker-Content-Digest': digest } return (200, headers, '') responses.add_callback(responses.GET, url, callback=request_callback) expected_versions = versions if versions is None: # Test default versions value expected_versions = ('v1', 'v2') expected_result = dict( (version, '{}-digest'.format(version)) for version in expected_versions) if expected_versions: actual_digests = get_manifest_digests(**kwargs) assert actual_digests.v1 == expected_result.get('v1') assert actual_digests.v2 == expected_result.get('v2') else: with pytest.raises(RuntimeError): get_manifest_digests(**kwargs)
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry max_retries = DOCKER_PUSH_MAX_RETRIES expect_v2s2 = False for registry in self.registries: media_types = self.registries[registry].get('expected_media_types', []) if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in media_types: expect_v2s2 = True if not (self.group or expect_v2s2): max_retries = 0 for retry in range(max_retries + 1): if self.need_skopeo_push(): self.push_with_skopeo(registry_image, insecure, docker_push_secret) else: self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) if (not (digests.v2 or digests.oci) and (retry < max_retries)): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2 ** retry) self.log.info("Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: if not self.need_skopeo_push(): defer_removal(self.workflow, registry_image) break pushed_images.append(registry_image) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info("V2 schema 2 or OCI manifest is not available to get config from") self.log.info("All images were tagged and pushed") return pushed_images
def test_get_manifest_digests(tmpdir, image, registry, insecure, creds, versions, url): kwargs = {} image = ImageName.parse(image) kwargs['image'] = image if creds: temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig: dockerconfig.write(json.dumps({ image.registry: { 'username': creds[0], 'password': creds[1] } })) kwargs['dockercfg_path'] = temp_dir kwargs['registry'] = registry if insecure is not None: kwargs['insecure'] = insecure if versions is not None: kwargs['versions'] = versions def request_callback(request): if creds and creds[0] and creds[1]: assert request.headers['Authorization'] media_type = request.headers['Accept'] if media_type.endswith('v2+json'): digest = 'v2-digest' elif media_type.endswith('v1+json'): digest = 'v1-digest' else: raise ValueError('Unexpected media type {}'.format(media_type)) media_type_prefix = media_type.split('+')[0] headers = { 'Content-Type': '{}+jsonish'.format(media_type_prefix), 'Docker-Content-Digest': digest } return (200, headers, '') responses.add_callback(responses.GET, url, callback=request_callback) expected_versions = versions if versions is None: # Test default versions value expected_versions = ('v1', 'v2') expected_result = dict( (version, '{}-digest'.format(version)) for version in expected_versions) if expected_versions: actual_digests = get_manifest_digests(**kwargs) assert actual_digests.v1 == expected_result.get('v1') assert actual_digests.v2 == expected_result.get('v2') else: with pytest.raises(RuntimeError): get_manifest_digests(**kwargs)
def get_output(workflow: DockerBuildWorkflow, buildroot_id: str, pullspec: ImageName, platform: str, source_build: bool = False): """ Build the 'output' section of the metadata. :param buildroot_id: str, buildroot_id :param pullspec: ImageName :param platform: str, output platform :param source_build: bool, is source_build ? :param logs: list, of Output logs :return: tuple, list of Output instances, and extra Output file """ def add_buildroot_id(output: Output) -> Output: output.metadata.update({'buildroot_id': buildroot_id}) return output extra_output_file = None output_files: List[Output] = [] image_id: str if source_build: manifest = workflow.data.koji_source_manifest image_id = manifest['config']['digest'] # we are using digest from manifest, because we can't get diff_ids # unless we pull image, which would fail due because there are so many layers layer_sizes = [{ 'digest': layer['digest'], 'size': layer['size'] } for layer in manifest['layers']] platform = os.uname()[4] else: imageutil = workflow.imageutil image_id = imageutil.get_inspect_for_image(pullspec, platform=platform)['Id'] inspect = imageutil.base_image_inspect(platform) parent_id = inspect['Id'] if inspect else None image_archive = str( workflow.build_dir.platform_dir(platform).exported_squashed_image) layer_sizes = imageutil.get_uncompressed_image_layer_sizes( image_archive) digests = get_manifest_digests(pullspec, workflow.conf.registry['uri'], workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None)) if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config = get_config_from_registry( pullspec, workflow.conf.registry['uri'], config_manifest_digest, workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None), config_manifest_type) # We don't need container_config section if config and 'container_config' in config: del config['container_config'] digest_pullspec = f"{pullspec.to_str(tag=False)}@{select_digest(digests)}" repositories = [pullspec.to_str(), digest_pullspec] typed_digests = { get_manifest_media_type(version): digest for version, digest in digests.items() if version != "v1" } tag_conf = workflow.data.tag_conf if source_build: tags = sorted(set(image.tag for image in tag_conf.images)) else: tags = sorted( image.tag for image in tag_conf.get_unique_images_with_platform(platform)) # since we are storing oci image as v2s2 all images now have 'docker-archive' type metadata, output = get_image_output(IMAGE_TYPE_DOCKER_ARCHIVE, image_id, platform, pullspec) metadata.update({ 'arch': platform, 'type': 'docker-image', 'components': [], 'extra': { 'image': { 'arch': platform, }, 'docker': { 'id': image_id, 'repositories': repositories, 'layer_sizes': layer_sizes, 'tags': tags, 'config': config, 'digests': typed_digests, }, }, }) if not config: del metadata['extra']['docker']['config'] if not source_build: metadata['components'] = get_image_components(workflow.data, platform) if parent_id is not None: metadata['extra']['docker']['parent_id'] = parent_id # Add the 'docker save' image to the output image = add_buildroot_id(output) if source_build: output_files.append(metadata) extra_output_file = output else: output_files.append(image) return output_files, extra_output_file
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry if self.need_skopeo_push(): self.push_with_skopeo(registry_image, insecure, docker_push_secret) else: self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) defer_removal(self.workflow, registry_image) pushed_images.append(registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info( "V2 schema 2 or OCI manifest is not available to get config from" ) self.log.info("All images were tagged and pushed") return pushed_images
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not running for failed build") return [] # Work out the name of the image to pull if not self.workflow.tag_conf.unique_images: raise ValueError("no unique image set, impossible to verify media types") if self.workflow.push_conf.pulp_registries: self.log.info("pulp registry configure, verify_media_types should not run") return image = self.workflow.tag_conf.unique_images[0] registries = deepcopy(get_registries(self.workflow, {})) media_in_registry = {} expect_list_only = self.get_manifest_list_only_expectation() for registry_name, registry in registries.items(): expected_media_types = set(registry.get('expected_media_types', [])) media_types = set() if expect_list_only: expected_media_types = set([MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]) media_in_registry[registry_name] = {'expected': expected_media_types} pullspec = image.copy() pullspec.registry = registry_name insecure = registry.get('insecure', False) secret = registry.get('secret', None) digests = get_manifest_digests(pullspec, registry_name, insecure, secret, require_digest=False) if digests: if digests.v2_list: media_types.add(MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST) if digests.v2: media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA2) if digests.v1: media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA1) if digests.oci: media_types.add(MEDIA_TYPE_OCI_V1) if digests.oci_index: media_types.add(MEDIA_TYPE_OCI_V1_INDEX) if verify_v1_image(pullspec, registry_name, self.log, insecure, secret): media_types.add(MEDIA_TYPE_DOCKER_V1) media_in_registry[registry_name]['found'] = media_types should_raise = False all_found = set() for registry_name, manifests in media_in_registry.items(): all_found.update(manifests['found']) if manifests['expected'] - manifests['found']: should_raise = True self.log.error("expected media types %s not in available media types %s," " for registry %s", sorted(manifests['expected'] - manifests['found']), sorted(manifests['found']), registry_name) if should_raise: raise KeyError("expected media types were not found") if expect_list_only: return [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST] return sorted(all_found)
def test_get_manifest_digests(tmpdir, image, registry, insecure, creds, versions, require_digest, path): kwargs = {} image = ImageName.parse(image) kwargs['image'] = image if creds: temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig: dockerconfig.write(json.dumps({ registry: { 'username': creds[0], 'password': creds[1] } })) kwargs['dockercfg_path'] = temp_dir kwargs['registry'] = registry if insecure is not None: kwargs['insecure'] = insecure if versions is not None: kwargs['versions'] = versions kwargs['require_digest'] = require_digest def request_callback(request, all_headers=True): if creds and creds[0] and creds[1]: assert request.headers['Authorization'] media_type = request.headers['Accept'] if media_type.endswith('list.v2+json'): digest = 'v2_list-digest' elif media_type.endswith('v2+json'): digest = 'v2-digest' elif media_type.endswith('v1+json'): digest = 'v1-digest' else: raise ValueError('Unexpected media type {}'.format(media_type)) media_type_prefix = media_type.split('+')[0] if all_headers: headers = { 'Content-Type': '{}+jsonish'.format(media_type_prefix), } if not media_type.endswith('list.v2+json'): headers['Docker-Content-Digest'] = digest else: headers = {} return (200, headers, '') if registry.startswith('http'): url = registry + path else: # In the insecure case, we should try the https URL, and when that produces # an error, fall back to http if insecure: https_url = 'https://' + registry + path responses.add(responses.GET, https_url, body=ConnectionError()) url = 'http://' + registry + path else: url = 'https://' + registry + path responses.add_callback(responses.GET, url, callback=request_callback) expected_versions = versions if versions is None: # Test default versions value expected_versions = ('v1', 'v2') expected_result = dict( (version, '{}-digest'.format(version)) for version in expected_versions) if versions and 'v2_list' in versions: expected_result['v2_list'] = True if expected_versions: actual_digests = get_manifest_digests(**kwargs) assert actual_digests.v1 == expected_result.get('v1') assert actual_digests.v2 == expected_result.get('v2') if 'v2_list' in expected_result: assert actual_digests.v2_list == expected_result.get('v2_list') elif require_digest: with pytest.raises(RuntimeError): get_manifest_digests(**kwargs) else: get_manifest_digests(**kwargs)
def get_output(workflow: DockerBuildWorkflow, buildroot_id: str, pullspec: ImageName, platform: str, source_build: bool = False): """ Build the 'output' section of the metadata. :param buildroot_id: str, buildroot_id :param pullspec: ImageName :param platform: str, output platform :param source_build: bool, is source_build ? :param logs: list, of Output logs :return: tuple, list of Output instances, and extra Output file """ def add_buildroot_id(output: Output) -> Output: output.metadata.update({'buildroot_id': buildroot_id}) return output extra_output_file = None output_files: List[Output] = [] image_id: str if source_build: manifest = workflow.data.koji_source_manifest image_id = manifest['config']['digest'] # we are using digest from manifest, because we can't get diff_ids # unless we pull image, which would fail due because there are so many layers layer_sizes = [{ 'digest': layer['digest'], 'size': layer['size'] } for layer in manifest['layers']] platform = os.uname()[4] else: imageutil = workflow.imageutil image_id = imageutil.get_inspect_for_image(pullspec, platform=platform)['Id'] parent_id = None if not workflow.data.dockerfile_images.base_from_scratch: parent_id = imageutil.base_image_inspect(platform)['Id'] image_archive = str( workflow.build_dir.platform_dir(platform).exported_squashed_image) layer_sizes = imageutil.get_uncompressed_image_layer_sizes( image_archive) digests = get_manifest_digests(pullspec, workflow.conf.registry['uri'], workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None)) if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config = get_config_from_registry( pullspec, workflow.conf.registry['uri'], config_manifest_digest, workflow.conf.registry['insecure'], workflow.conf.registry.get('secret', None), config_manifest_type) # We don't need container_config section if config and 'container_config' in config: del config['container_config'] digest_pullspec = f"{pullspec.to_str(tag=False)}@{select_digest(digests)}" repositories = [pullspec.to_str(), digest_pullspec] typed_digests = { get_manifest_media_type(version): digest for version, digest in digests.items() if version != "v1" } tag_conf = workflow.data.tag_conf if source_build: image_type = IMAGE_TYPE_DOCKER_ARCHIVE tags = sorted(set(image.tag for image in tag_conf.images)) else: image_metadatas = workflow.data.postbuild_results[ FetchDockerArchivePlugin.key] image_type = image_metadatas[platform]["type"] tags = sorted( image.tag for image in tag_conf.get_unique_images_with_platform(platform)) metadata, output = get_image_output(image_type, image_id, platform, pullspec) metadata.update({ 'arch': platform, 'type': 'docker-image', 'components': [], 'extra': { 'image': { 'arch': platform, }, 'docker': { 'id': image_id, 'repositories': repositories, 'layer_sizes': layer_sizes, 'tags': tags, 'config': config, 'digests': typed_digests, }, }, }) if not config: del metadata['extra']['docker']['config'] if not source_build: metadata['components'] = get_image_components(workflow, platform) if not workflow.data.dockerfile_images.base_from_scratch: metadata['extra']['docker']['parent_id'] = parent_id # Add the 'docker save' image to the output image = add_buildroot_id(output) # when doing regular build, worker already uploads image, # so orchestrator needs only metadata, # but source contaiener build didn't upload that image yet, # so we want metadata, and the image to upload if source_build: output_files.append(metadata) extra_output_file = output else: output_files.append(image) if not source_build: # add operator manifests to output operator_manifests_path = (workflow.data.postbuild_results.get( PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY)) if operator_manifests_path: manifests_metadata = get_output_metadata( operator_manifests_path, OPERATOR_MANIFESTS_ARCHIVE) operator_manifests_output = Output( filename=operator_manifests_path, metadata=manifests_metadata) add_custom_type(operator_manifests_output, KOJI_BTYPE_OPERATOR_MANIFESTS) operator_manifests = add_buildroot_id(operator_manifests_output) output_files.append(operator_manifests) return output_files, extra_output_file
def run(self): pushed_images = [] source_oci_image_path = self.workflow.build_result.oci_image_path if source_oci_image_path: source_unique_image = self.source_get_unique_image() if not self.workflow.tag_conf.unique_images: if source_oci_image_path: self.workflow.tag_conf.add_unique_image(source_unique_image) else: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None image_size_limit = get_image_size_limit(self.workflow) for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) if not source_oci_image_path: image_size = sum(item['size'] for item in self.workflow.layer_sizes) config_image_size = image_size_limit['binary_image'] # Only handle the case when size is set > 0 in config if config_image_size and image_size > config_image_size: raise ExceedsImageSizeError( 'The size {} of image {} exceeds the limitation {} ' 'configured in reactor config.'.format( image_size, image, image_size_limit)) registry_image = image.copy() registry_image.registry = registry max_retries = DOCKER_PUSH_MAX_RETRIES for retry in range(max_retries + 1): if self.need_skopeo_push() or source_oci_image_path: self.push_with_skopeo(registry_image, insecure, docker_push_secret, source_oci_image_path) else: self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) if source_oci_image_path: manifests_dict = get_all_manifests(registry_image, registry, insecure, docker_push_secret, versions=('v2', )) try: koji_source_manifest_response = manifests_dict[ 'v2'] except KeyError as exc: raise RuntimeError( f'Unable to fetch v2 schema 2 digest for {registry_image.to_str()}' ) from exc self.workflow.koji_source_manifest = koji_source_manifest_response.json( ) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) if (not (digests.v2 or digests.oci) and (retry < max_retries)): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2**retry) self.log.info( "Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: if not self.need_skopeo_push(): defer_removal(self.workflow, registry_image) break pushed_images.append(registry_image) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info( "V2 schema 2 or OCI manifest is not available to get config from" ) self.log.info("All images were tagged and pushed") return pushed_images
def test_get_manifest_digests(tmpdir, caplog, image, registry, insecure, creds, versions, require_digest, path): kwargs = {} image = ImageName.parse(image) kwargs['image'] = image if creds: temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig: dockerconfig.write( json.dumps( {registry: { 'username': creds[0], 'password': creds[1] }})) kwargs['dockercfg_path'] = temp_dir kwargs['registry'] = registry if insecure is not None: kwargs['insecure'] = insecure if versions is not None: kwargs['versions'] = versions kwargs['require_digest'] = require_digest def request_callback(request, all_headers=True): if creds and creds[0] and creds[1]: assert request.headers['Authorization'] media_type = request.headers['Accept'] if media_type.endswith('list.v2+json'): digest = 'v2_list-digest' elif media_type.endswith('v2+json'): digest = 'v2-digest' elif media_type.endswith('v1+json'): digest = 'v1-digest' else: raise ValueError('Unexpected media type {}'.format(media_type)) media_type_prefix = media_type.split('+')[0] if all_headers: headers = { 'Content-Type': '{}+jsonish'.format(media_type_prefix), } if not media_type.endswith('list.v2+json'): headers['Docker-Content-Digest'] = digest else: headers = {} return (200, headers, '') if registry.startswith('http'): url = registry + path else: # In the insecure case, we should try the https URL, and when that produces # an error, fall back to http if insecure: https_url = 'https://' + registry + path responses.add(responses.GET, https_url, body=ConnectionError()) url = 'http://' + registry + path else: url = 'https://' + registry + path responses.add_callback(responses.GET, url, callback=request_callback) expected_versions = versions if versions is None: # Test default versions value expected_versions = ('v1', 'v2') expected_result = dict((version, '{}-digest'.format(version)) for version in expected_versions) if versions and 'v2_list' in versions: expected_result['v2_list'] = True # Only capture errors, since we want to be sure none are reported with caplog.atLevel(logging.ERROR): if expected_versions: actual_digests = get_manifest_digests(**kwargs) # Check the expected versions are found assert actual_digests.v1 == expected_result.get('v1') assert actual_digests.v2 == expected_result.get('v2') if 'v2_list' in expected_result: assert actual_digests.v2_list == expected_result.get('v2_list') elif require_digest: # When require_digest is set but there is no digest # available (no expected_versions), expect a RuntimeError with pytest.raises(RuntimeError): get_manifest_digests(**kwargs) else: get_manifest_digests(**kwargs) # there should be no errors reported assert not caplog.records()
def run(self): pushed_images = [] source_oci_image_path = self.workflow.build_result.oci_image_path if source_oci_image_path: source_unique_image = self.source_get_unique_image() if not self.workflow.tag_conf.unique_images: if source_oci_image_path: self.workflow.tag_conf.add_unique_image(source_unique_image) else: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry max_retries = DOCKER_PUSH_MAX_RETRIES expect_v2s2 = False for registry in self.registries: media_types = self.registries[registry].get( 'expected_media_types', []) if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in media_types: expect_v2s2 = True if not (self.group or expect_v2s2): max_retries = 0 for retry in range(max_retries + 1): if self.need_skopeo_push() or source_oci_image_path: self.push_with_skopeo(registry_image, insecure, docker_push_secret, source_oci_image_path) else: self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) if source_oci_image_path: manifests_dict = get_all_manifests(registry_image, registry, insecure, docker_push_secret, versions=('v2', )) try: koji_source_manifest_response = manifests_dict[ 'v2'] except KeyError: raise RuntimeError( 'Unable to fetch v2 schema 2 digest for {}'. format(registry_image.to_str())) self.workflow.koji_source_manifest = koji_source_manifest_response.json( ) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) if (not (digests.v2 or digests.oci) and (retry < max_retries)): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2**retry) self.log.info( "Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: if not self.need_skopeo_push(): defer_removal(self.workflow, registry_image) break pushed_images.append(registry_image) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info( "V2 schema 2 or OCI manifest is not available to get config from" ) self.log.info("All images were tagged and pushed") return pushed_images
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not running for failed build") return [] # Work out the name of the image to pull if not self.workflow.tag_conf.unique_images: raise ValueError( "no unique image set, impossible to verify media types") image = self.workflow.tag_conf.unique_images[0] registries = deepcopy(get_registries(self.workflow, {})) media_in_registry = {} expect_list_only = self.get_manifest_list_only_expectation() for registry_name, registry in registries.items(): expected_media_types = set(registry.get('expected_media_types', [])) media_types = set() if expect_list_only: expected_media_types = set( [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]) media_in_registry[registry_name] = { 'expected': expected_media_types } pullspec = image.copy() pullspec.registry = registry_name insecure = registry.get('insecure', False) secret = registry.get('secret', None) kwargs = {} if PLUGIN_FETCH_SOURCES_KEY in self.workflow.prebuild_results: # For source containers, limit the versions we ask # about (and, if necessary, the expected media types). # This can help to avoid issues with tooling that is # unable to deal with the number of layers in these # images. src_config = get_source_container(self.workflow, fallback={}) limit_media_types = src_config.get('limit_media_types') if limit_media_types is not None: short_name = { v: k for k, v in ManifestDigest.content_type.items() } versions = tuple(short_name[mt] for mt in limit_media_types) kwargs['versions'] = versions if expected_media_types: expected_media_types.intersection_update( set(limit_media_types)) digests = get_manifest_digests(pullspec, registry_name, insecure, secret, require_digest=False, **kwargs) if digests: if digests.v2_list: media_types.add(MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST) if digests.v2: media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA2) if digests.v1: media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA1) if digests.oci: media_types.add(MEDIA_TYPE_OCI_V1) if digests.oci_index: media_types.add(MEDIA_TYPE_OCI_V1_INDEX) media_in_registry[registry_name]['found'] = media_types should_raise = False all_found = set() for registry_name, manifests in media_in_registry.items(): all_found.update(manifests['found']) if manifests['expected'] - manifests['found']: should_raise = True self.log.error( "expected media types %s not in available media types %s," " for registry %s", sorted(manifests['expected'] - manifests['found']), sorted(manifests['found']), registry_name) if should_raise: raise KeyError("expected media types were not found") if expect_list_only: return [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST] return sorted(all_found)
def run(self) -> Dict[str, Union[List, Dict[str, List[str]]]]: is_source_build = PLUGIN_FETCH_SOURCES_KEY in self.workflow.data.prebuild_results if not is_source_build and not is_flatpak_build(self.workflow): self.log.info('not a flatpak or source build, skipping plugin') return {'pushed_images': [], 'repositories': self.get_repositories()} pushed_images = [] wf_data = self.workflow.data tag_conf = wf_data.tag_conf images = [] if is_source_build: source_image = self.source_get_unique_image() plugin_results = wf_data.buildstep_result[PLUGIN_SOURCE_CONTAINER_KEY] image = plugin_results['image_metadata'] tag_conf.add_unique_image(source_image) images.append((image, source_image)) else: for image_platform in get_platforms(self.workflow.data): plugin_results = wf_data.postbuild_results[PLUGIN_FLATPAK_CREATE_OCI] image = plugin_results[image_platform]['metadata'] registry_image = tag_conf.get_unique_images_with_platform(image_platform)[0] images.append((image, registry_image)) insecure = self.registry.get('insecure', False) docker_push_secret = self.registry.get('secret', None) self.log.info("Registry %s secret %s", self.registry['uri'], docker_push_secret) for image, registry_image in images: max_retries = DOCKER_PUSH_MAX_RETRIES for retry in range(max_retries + 1): self.push_with_skopeo(image, registry_image, insecure, docker_push_secret) if is_source_build: manifests_dict = get_all_manifests(registry_image, self.registry['uri'], insecure, docker_push_secret, versions=('v2',)) try: koji_source_manifest_response = manifests_dict['v2'] except KeyError as exc: raise RuntimeError( f'Unable to fetch v2 schema 2 digest for {registry_image.to_str()}' ) from exc wf_data.koji_source_manifest = koji_source_manifest_response.json() digests = get_manifest_digests(registry_image, self.registry['uri'], insecure, docker_push_secret) if not (digests.v2 or digests.oci) and (retry < max_retries): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2 ** retry) self.log.info("Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: break pushed_images.append(registry_image) self.log.info("All images were tagged and pushed") return {'pushed_images': pushed_images, 'repositories': self.get_repositories()}
def test_get_manifest_digests_missing(tmpdir, has_content_type_header, has_content_digest, manifest_type, can_convert_v2_v1): kwargs = {} image = ImageName.parse('example.com/spam:latest') kwargs['image'] = image kwargs['registry'] = 'https://example.com' expected_url = 'https://example.com/v2/spam/manifests/latest' mock_get_retry_session() def custom_get(url, headers, **kwargs): assert url == expected_url media_type = headers['Accept'] media_type_prefix = media_type.split('+')[0] assert media_type.endswith('+json') # Attempt to simulate how a docker registry behaves: # * If the stored digest is v1, return it # * If the stored digest is v2, and v2 is requested, return it # * If the stored digest is v2, and v1 is requested, try # to convert and return v1 or an error. if manifest_type == 'v1': digest = 'v1-digest' media_type_prefix = 'application/vnd.docker.distribution.manifest.v1' elif manifest_type == 'v2': if media_type_prefix == 'application/vnd.docker.distribution.manifest.v2': digest = 'v2-digest' else: if not can_convert_v2_v1: response_json = {"errors": [{"code": "MANIFEST_INVALID"}]} response = requests.Response() flexmock(response, status_code=400, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response digest = 'v1-converted-digest' media_type_prefix = 'application/vnd.docker.distribution.manifest.v1' elif manifest_type == 'oci': if media_type_prefix == 'application/vnd.oci.image.manifest.v1': digest = 'oci-digest' else: headers = {} response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]} response = requests.Response() flexmock(response, status_code=requests.codes.not_found, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response elif manifest_type == 'oci_index': if media_type_prefix == 'application/vnd.oci.image.index.v1': digest = 'oci-index-digest' else: headers = {} response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]} response = requests.Response() flexmock(response, status_code=requests.codes.not_found, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response headers = {} if has_content_type_header: headers['Content-Type'] = '{}+jsonish'.format(media_type_prefix) if has_content_digest: headers['Docker-Content-Digest'] = digest if media_type_prefix == 'application/vnd.docker.distribution.manifest.v1': response_json = {'schemaVersion': 1} else: response_json = { 'schemaVersion': 2, 'mediaType': media_type_prefix + '+json' } response = requests.Response() flexmock(response, status_code=200, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response (flexmock(requests.Session).should_receive('get').replace_with(custom_get)) actual_digests = get_manifest_digests(**kwargs) if manifest_type == 'v1': if has_content_digest: assert actual_digests.v1 == 'v1-digest' else: assert actual_digests.v1 is True assert actual_digests.v2 is None assert actual_digests.oci is None assert actual_digests.oci_index is None elif manifest_type == 'v2': if can_convert_v2_v1: if has_content_digest: assert actual_digests.v1 == 'v1-converted-digest' else: assert actual_digests.v1 is True else: assert actual_digests.v1 is None if has_content_digest: assert actual_digests.v2 == 'v2-digest' else: assert actual_digests.v2 is True assert actual_digests.oci is None assert actual_digests.oci_index is None elif manifest_type == 'oci': assert actual_digests.v1 is None assert actual_digests.v2 is None if has_content_digest: assert actual_digests.oci == 'oci-digest' else: assert actual_digests.oci is True assert actual_digests.oci_index is None elif manifest_type == 'oci_index': assert actual_digests.v1 is None assert actual_digests.v2 is None assert actual_digests.oci is None if has_content_digest: assert actual_digests.oci_index == 'oci-index-digest' else: assert actual_digests.oci_index is True
def test_get_manifest_digests_missing(tmpdir, has_content_type_header, has_content_digest, digest_is_v1, can_convert_v2_v1): kwargs = {} image = ImageName.parse('example.com/spam:latest') kwargs['image'] = image kwargs['registry'] = 'https://example.com' expected_url = 'https://example.com/v2/spam/manifests/latest' mock_get_retry_session() def custom_get(url, headers, **kwargs): assert url == expected_url media_type = headers['Accept'] media_type_prefix = media_type.split('+')[0] assert media_type.endswith('v2+json') or media_type.endswith('v1+json') # Attempt to simulate how a docker registry behaves: # * If the stored digest is v1, return it # * If the stored digest is v2, and v2 is requested, return it # * If the stored digest is v2, and v1 is requested, try # to convert and return v1 or an error. if digest_is_v1: digest = 'v1-digest' media_type_prefix = media_type_prefix.replace('v2', 'v1', 1) else: if media_type.endswith('v2+json'): digest = 'v2-digest' else: if not can_convert_v2_v1: response_json = {"errors": [{"code": "MANIFEST_INVALID"}]} response = requests.Response() flexmock(response, status_code=400, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response digest = 'v1-converted-digest' headers = {} if has_content_type_header: headers['Content-Type'] = '{}+jsonish'.format(media_type_prefix) if has_content_digest: headers['Docker-Content-Digest'] = digest if media_type_prefix.endswith('v2'): response_json = { 'schemaVersion': 2, 'mediaType': 'application/vnd.docker.distribution.manifest.v2+json' } else: response_json = {'schemaVersion': 1} response = requests.Response() flexmock(response, status_code=200, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response (flexmock(requests.Session).should_receive('get').replace_with(custom_get)) if digest_is_v1 and not has_content_type_header: # v1 manifests don't have a mediaType field, so we can't fall back # to looking at the returned manifest to detect the type. with pytest.raises(RuntimeError): get_manifest_digests(**kwargs) return else: actual_digests = get_manifest_digests(**kwargs) if digest_is_v1: if has_content_digest: assert actual_digests.v1 == 'v1-digest' else: assert actual_digests.v1 is True assert actual_digests.v2 is None else: if can_convert_v2_v1: if has_content_type_header: if has_content_digest: assert actual_digests.v1 == 'v1-converted-digest' else: assert actual_digests.v1 is True else: # don't even know the response is v1 without Content-Type assert actual_digests.v1 is None else: assert actual_digests.v1 is None if has_content_digest: assert actual_digests.v2 == 'v2-digest' else: assert actual_digests.v2 is True
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not running for failed build") return [] # Work out the name of the image to pull if not self.workflow.tag_conf.unique_images: raise ValueError("no unique image set, impossible to verify media types") if self.workflow.push_conf.pulp_registries: self.log.info("pulp registry configure, verify_media_types should not run") return image = self.workflow.tag_conf.unique_images[0] media_types = set() registries = deepcopy(get_registries(self.workflow, {})) for registry_name, registry in registries.items(): initial_media_types = registry.get('expected_media_types', []) if not initial_media_types: continue expected_media_types = self.set_manifest_list_expectations(initial_media_types) pullspec = image.copy() pullspec.registry = registry_name insecure = registry.get('insecure', False) secret = registry.get('secret', None) check_digests = (MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in expected_media_types or MEDIA_TYPE_DOCKER_V2_SCHEMA2 in expected_media_types or MEDIA_TYPE_DOCKER_V2_SCHEMA1 in expected_media_types) if check_digests: digests = get_manifest_digests(pullspec, registry_name, insecure, secret, require_digest=False) if digests: if digests.v2_list: self.log.info("Manifest list found") if MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in expected_media_types: media_types.add(MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST) if digests.v2: self.log.info("V2 schema 2 digest found") if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in expected_media_types: media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA2) if digests.v1: self.log.info("V2 schema 1 digest found") if MEDIA_TYPE_DOCKER_V2_SCHEMA1 in expected_media_types: media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA1) if MEDIA_TYPE_DOCKER_V1 in expected_media_types: if verify_v1_image(pullspec, registry_name, self.log, insecure, secret): media_types.add(MEDIA_TYPE_DOCKER_V1) # sorting the media type here so the failure message is predictable for unit tests missing_types = [] for media_type in sorted(expected_media_types): if media_type not in media_types: missing_types.append(media_type) if missing_types: raise KeyError("expected media types {0} ".format(missing_types) + "not in available media types {0}".format(sorted(media_types))) return sorted(media_types)
def test_get_manifest_digests_missing(tmpdir, has_content_type_header, has_content_digest, manifest_type, can_convert_v2_v1): kwargs = {} image = ImageName.parse('example.com/spam:latest') kwargs['image'] = image kwargs['registry'] = 'https://example.com' expected_url = 'https://example.com/v2/spam/manifests/latest' mock_get_retry_session() def custom_get(url, headers, **kwargs): assert url == expected_url media_type = headers['Accept'] media_type_prefix = media_type.split('+')[0] assert media_type.endswith('+json') # Attempt to simulate how a docker registry behaves: # * If the stored digest is v1, return it # * If the stored digest is v2, and v2 is requested, return it # * If the stored digest is v2, and v1 is requested, try # to convert and return v1 or an error. if manifest_type == 'v1': digest = 'v1-digest' media_type_prefix = 'application/vnd.docker.distribution.manifest.v1' elif manifest_type == 'v2': if media_type_prefix == 'application/vnd.docker.distribution.manifest.v2': digest = 'v2-digest' else: if not can_convert_v2_v1: response_json = {"errors": [{"code": "MANIFEST_INVALID"}]} response = requests.Response() flexmock(response, status_code=400, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response digest = 'v1-converted-digest' media_type_prefix = 'application/vnd.docker.distribution.manifest.v1' elif manifest_type == 'oci': if media_type_prefix == 'application/vnd.oci.image.manifest.v1': digest = 'oci-digest' else: headers = {} response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]} response = requests.Response() flexmock(response, status_code=requests.codes.not_found, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response elif manifest_type == 'oci_index': if media_type_prefix == 'application/vnd.oci.image.index.v1': digest = 'oci-index-digest' else: headers = {} response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]} response = requests.Response() flexmock(response, status_code=requests.codes.not_found, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response headers = {} if has_content_type_header: headers['Content-Type'] = '{}+jsonish'.format(media_type_prefix) if has_content_digest: headers['Docker-Content-Digest'] = digest if media_type_prefix == 'application/vnd.docker.distribution.manifest.v1': response_json = {'schemaVersion': 1} else: response_json = {'schemaVersion': 2, 'mediaType': media_type_prefix + '+json'} response = requests.Response() flexmock(response, status_code=200, content=json.dumps(response_json).encode("utf-8"), headers=headers) return response (flexmock(requests.Session) .should_receive('get') .replace_with(custom_get)) if manifest_type == 'v1' and not has_content_type_header: # v1 manifests don't have a mediaType field, so we can't fall back # to looking at the returned manifest to detect the type. with pytest.raises(RuntimeError): get_manifest_digests(**kwargs) return else: actual_digests = get_manifest_digests(**kwargs) if manifest_type == 'v1': if has_content_digest: assert actual_digests.v1 == 'v1-digest' else: assert actual_digests.v1 is True assert actual_digests.v2 is None assert actual_digests.oci is None assert actual_digests.oci_index is None elif manifest_type == 'v2': if can_convert_v2_v1: if has_content_type_header: if has_content_digest: assert actual_digests.v1 == 'v1-converted-digest' else: assert actual_digests.v1 is True else: # don't even know the response is v1 without Content-Type assert actual_digests.v1 is None else: assert actual_digests.v1 is None if has_content_digest: assert actual_digests.v2 == 'v2-digest' else: assert actual_digests.v2 is True assert actual_digests.oci is None assert actual_digests.oci_index is None elif manifest_type == 'oci': assert actual_digests.v1 is None assert actual_digests.v2 is None if has_content_digest: assert actual_digests.oci == 'oci-digest' else: assert actual_digests.oci is True assert actual_digests.oci_index is None elif manifest_type == 'oci_index': assert actual_digests.v1 is None assert actual_digests.v2 is None assert actual_digests.oci is None if has_content_digest: assert actual_digests.oci_index == 'oci-index-digest' else: assert actual_digests.oci_index is True