def test_manifestbackfillworker_mislinked_manifest(clear_rows, initialized_db): """ Tests that a manifest whose image is mislinked will have its storages relinked properly. """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() repo = model.repository.get_repository('devtable', 'complex') tag_v30 = model.tag.get_active_tag('devtable', 'gargantuan', 'v3.0') tag_v50 = model.tag.get_active_tag('devtable', 'gargantuan', 'v5.0') # Add a mislinked manifest, by having its layer point to a blob in v3.0 but its image # be the v5.0 image. builder = DockerSchema1ManifestBuilder('devtable', 'gargantuan', 'sometag') builder.add_layer(tag_v30.image.storage.content_checksum, '{"id": "foo"}') manifest = builder.build(docker_v2_signing_key) mislinked_manifest = TagManifest.create(json_data=manifest.bytes.as_encoded_str(), digest=manifest.digest, tag=tag_v50) # Backfill the manifest and ensure its proper content checksum was linked. assert _backfill_manifest(mislinked_manifest) map_row = TagManifestToManifest.get(tag_manifest=mislinked_manifest) assert not map_row.broken manifest_row = map_row.manifest legacy_image = ManifestLegacyImage.get(manifest=manifest_row).image assert legacy_image == tag_v50.image manifest_blobs = list(ManifestBlob.select().where(ManifestBlob.manifest == manifest_row)) assert len(manifest_blobs) == 1 assert manifest_blobs[0].blob.content_checksum == tag_v30.image.storage.content_checksum
def test_create_manifest_with_temp_tag(initialized_db, registry_model): builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest") builder.add_layer( "sha256:abcde", json.dumps( { "id": "someid", "author": "some user", }, ensure_ascii=False, ), ) manifest = builder.build(ensure_ascii=False) for blob_digest in manifest.local_blob_digests: _populate_blob(blob_digest) # Create the manifest in the database. repository_ref = registry_model.lookup_repository("devtable", "simple") created = registry_model.create_manifest_with_temp_tag( repository_ref, manifest, 300, storage) assert created.digest == manifest.digest # Ensure it cannot be found normally, since it is simply temp-tagged. assert registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest) is None # Ensure it can be found, which means it is temp-tagged. found = registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest, allow_dead=True) assert found is not None
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever): if self.has_remote_layer: return None v1_builder = DockerSchema1ManifestBuilder(namespace_name, repo_name, tag_name) self._populate_schema1_builder(v1_builder, content_retriever) return v1_builder.build()
def test_manifestbackfillworker_mislinked_invalid_manifest(clear_rows, initialized_db): """ Tests that a manifest whose image is mislinked will attempt to have its storages relinked properly. """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() repo = model.repository.get_repository("devtable", "complex") tag_v50 = model.tag.get_active_tag("devtable", "gargantuan", "v5.0") # Add a mislinked manifest, by having its layer point to an invalid blob but its image # be the v5.0 image. builder = DockerSchema1ManifestBuilder("devtable", "gargantuan", "sometag") builder.add_layer("sha256:deadbeef", '{"id": "foo"}') manifest = builder.build(docker_v2_signing_key) broken_manifest = TagManifest.create( json_data=manifest.bytes.as_encoded_str(), digest=manifest.digest, tag=tag_v50 ) # Backfill the manifest and ensure it is marked as broken. assert _backfill_manifest(broken_manifest) map_row = TagManifestToManifest.get(tag_manifest=broken_manifest) assert map_row.broken manifest_row = map_row.manifest legacy_image = ManifestLegacyImage.get(manifest=manifest_row).image assert legacy_image == tag_v50.image manifest_blobs = list(ManifestBlob.select().where(ManifestBlob.manifest == manifest_row)) assert len(manifest_blobs) == 0
def test_store_tag_manifest(get_storages, initialized_db): # Create a manifest with some layers. builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'sometag') storages = get_storages() assert storages repo = model.repository.get_repository('devtable', 'simple') storage_id_map = {} for index, storage in enumerate(storages): image_id = 'someimage%s' % index builder.add_layer(storage.content_checksum, json.dumps({'id': image_id})) find_create_or_link_image(image_id, repo, 'devtable', {}, 'local_us') storage_id_map[storage.content_checksum] = storage.id manifest = builder.build(docker_v2_signing_key) tag_manifest, _ = store_tag_manifest_for_testing('devtable', 'simple', 'sometag', manifest, manifest.leaf_layer_v1_image_id, storage_id_map) # Ensure we have the new-model expected rows. mapping_row = TagManifestToManifest.get(tag_manifest=tag_manifest) assert mapping_row.manifest is not None assert mapping_row.manifest.manifest_bytes == manifest.bytes.as_encoded_str() assert mapping_row.manifest.digest == str(manifest.digest) blob_rows = {m.blob_id for m in ManifestBlob.select().where(ManifestBlob.manifest == mapping_row.manifest)} assert blob_rows == {s.id for s in storages} assert ManifestLegacyImage.get(manifest=mapping_row.manifest).image == tag_manifest.tag.image
def test_create_manifest_and_retarget_tag_with_labels(registry_model): repository_ref = registry_model.lookup_repository('devtable', 'simple') latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True) manifest = registry_model.get_manifest_for_tag( latest_tag).get_parsed_manifest() json_metadata = { 'id': latest_tag.legacy_image.docker_image_id, 'config': { 'Labels': { 'quay.expires-after': '2w', }, }, } builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag') builder.add_layer(manifest.blob_digests[0], json.dumps(json_metadata)) sample_manifest = builder.build(docker_v2_signing_key) assert sample_manifest is not None another_manifest, tag = registry_model.create_manifest_and_retarget_tag( repository_ref, sample_manifest, 'anothertag', storage) assert another_manifest is not None assert tag is not None assert tag.name == 'anothertag' assert another_manifest.get_parsed_manifest( ).manifest_dict == sample_manifest.manifest_dict # Ensure the labels were applied. assert tag.lifetime_end_ms is not None
def test_create_manifest_and_retarget_tag_with_labels(registry_model): repository_ref = registry_model.lookup_repository("devtable", "simple") latest_tag = registry_model.get_repo_tag(repository_ref, "latest", include_legacy_image=True) manifest = registry_model.get_manifest_for_tag( latest_tag).get_parsed_manifest() json_metadata = { "id": latest_tag.legacy_image.docker_image_id, "config": { "Labels": { "quay.expires-after": "2w", }, }, } builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag") builder.add_layer(manifest.blob_digests[0], json.dumps(json_metadata)) sample_manifest = builder.build(docker_v2_signing_key) assert sample_manifest is not None another_manifest, tag = registry_model.create_manifest_and_retarget_tag( repository_ref, sample_manifest, "anothertag", storage) assert another_manifest is not None assert tag is not None assert tag.name == "anothertag" assert another_manifest.get_parsed_manifest( ).manifest_dict == sample_manifest.manifest_dict # Ensure the labels were applied. assert tag.lifetime_end_ms is not None
def _build_manifest_for_legacy_image(self, tag_name, legacy_image_row): import features from app import app, docker_v2_signing_key repo = legacy_image_row.repository namespace_name = repo.namespace_user.username repo_name = repo.name # Find the v1 metadata for this image and its parents. try: parents = model.image.get_parent_images(namespace_name, repo_name, legacy_image_row) except model.DataModelException: logger.exception( "Could not load parent images for legacy image %s", legacy_image_row.id ) return None # If the manifest is being generated under the library namespace, then we make its namespace # empty. manifest_namespace = namespace_name if features.LIBRARY_SUPPORT and namespace_name == app.config["LIBRARY_NAMESPACE"]: manifest_namespace = "" # Create and populate the manifest builder builder = DockerSchema1ManifestBuilder(manifest_namespace, repo_name, tag_name) # Add the leaf layer builder.add_layer( legacy_image_row.storage.content_checksum, legacy_image_row.v1_json_metadata ) if legacy_image_row.storage.uploading: logger.error("Cannot add an uploading storage row: %s", legacy_image_row.storage.id) return None for parent_image in parents: if parent_image.storage.uploading: logger.error("Cannot add an uploading storage row: %s", legacy_image_row.storage.id) return None builder.add_layer(parent_image.storage.content_checksum, parent_image.v1_json_metadata) try: built_manifest = builder.build(docker_v2_signing_key) # If the generated manifest is greater than the maximum size, regenerate it with # intermediate metadata layers stripped down to their bare essentials. if len(built_manifest.bytes.as_encoded_str()) > MAXIMUM_GENERATED_MANIFEST_SIZE: built_manifest = builder.with_metadata_removed().build(docker_v2_signing_key) if len(built_manifest.bytes.as_encoded_str()) > MAXIMUM_GENERATED_MANIFEST_SIZE: logger.error("Legacy image is too large to generate manifest") return None return built_manifest except ManifestException as me: logger.exception( "Got exception when trying to build manifest for legacy image %s", legacy_image_row ) return None
def build_schema1(self, namespace, repo_name, tag_name, images, blobs, options, arch="amd64"): builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name, arch) for image in reversed(images): assert image.urls is None checksum = "sha256:" + hashlib.sha256(image.bytes).hexdigest() blobs[checksum] = image.bytes # If invalid blob references were requested, just make it up. if options.manifest_invalid_blob_references: checksum = "sha256:" + hashlib.sha256("notarealthing").hexdigest() layer_dict = {"id": image.id, "parent": image.parent_id} if image.config is not None: layer_dict["config"] = image.config if image.size is not None: layer_dict["Size"] = image.size if image.created is not None: layer_dict["created"] = image.created builder.add_layer(checksum, json.dumps(layer_dict, ensure_ascii=options.ensure_ascii)) # Build the manifest. built = builder.build(self.jwk, ensure_ascii=options.ensure_ascii) # Validate it before we send it. DockerSchema1Manifest(built.bytes) return built
def test_unicode_emoji(registry_model): builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'latest') builder.add_layer( 'sha256:abcde', json.dumps({ 'id': 'someid', 'author': u'😱', }, ensure_ascii=False)) manifest = builder.build(ensure_ascii=False) manifest._validate() for blob_digest in manifest.local_blob_digests: _populate_blob(blob_digest) # Create the manifest in the database. repository_ref = registry_model.lookup_repository('devtable', 'simple') created_manifest, _ = registry_model.create_manifest_and_retarget_tag( repository_ref, manifest, 'latest', storage) assert created_manifest assert created_manifest.digest == manifest.digest assert (created_manifest.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()) # Look it up again and validate. found = registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest, allow_dead=True) assert found assert found.digest == manifest.digest assert found.internal_manifest_bytes.as_encoded_str( ) == manifest.bytes.as_encoded_str() assert found.get_parsed_manifest().digest == manifest.digest
def test_manifestbackfillworker_repeat_digest(clear_rows, initialized_db): """ Tests that a manifest with a shared digest will be properly linked. """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() repo = model.repository.get_repository("devtable", "gargantuan") tag_v30 = model.tag.get_active_tag("devtable", "gargantuan", "v3.0") tag_v50 = model.tag.get_active_tag("devtable", "gargantuan", "v5.0") # Build a manifest and assign it to both tags (this is allowed in the old model). builder = DockerSchema1ManifestBuilder("devtable", "gargantuan", "sometag") builder.add_layer("sha256:deadbeef", '{"id": "foo"}') manifest = builder.build(docker_v2_signing_key) manifest_1 = TagManifest.create(json_data=manifest.bytes.as_encoded_str(), digest=manifest.digest, tag=tag_v30) manifest_2 = TagManifest.create(json_data=manifest.bytes.as_encoded_str(), digest=manifest.digest, tag=tag_v50) # Backfill "both" manifests and ensure both are pointed to by a single resulting row. assert _backfill_manifest(manifest_1) assert _backfill_manifest(manifest_2) map_row1 = TagManifestToManifest.get(tag_manifest=manifest_1) map_row2 = TagManifestToManifest.get(tag_manifest=manifest_2) assert map_row1.manifest == map_row2.manifest
def move_tag(repository, tag, image_ids, expect_gc=True): namespace = repository.namespace_user.username name = repository.name repo_ref = RepositoryReference.for_repo_obj(repository) builder = DockerSchema1ManifestBuilder(namespace, name, tag) # NOTE: Building root to leaf. parent_id = None for image_id in image_ids: config = {"id": image_id, "config": {"Labels": {"foo": "bar", "meh": "grah",}}} if parent_id: config["parent"] = parent_id # Create a storage row for the layer blob. _, layer_blob_digest = _populate_blob(repository, image_id.encode("ascii")) builder.insert_layer(layer_blob_digest, json.dumps(config)) parent_id = image_id # Store the manifest. manifest = builder.build(docker_v2_signing_key) registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, tag, storage, raise_on_error=True ) if expect_gc: assert model.gc.garbage_collect_repo(repository) == expect_gc
def test_build_unencoded_unicode_manifest(with_key): builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag") builder.add_layer( "sha256:abcde", json.dumps({"id": "someid", "author": "Sômé guy",}, ensure_ascii=False) ) built = builder.build(with_key, ensure_ascii=False) built._validate()
def test_build_with_metadata_removed(): builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag") builder.add_layer( "sha256:abcde", json.dumps( { "id": "someid", "parent": "someid", "author": "😱", "comment": "hello world!", "created": "1975-01-02 12:34", "Size": 5678, "container_config": {"Cmd": "foobar", "more": "stuff", "goes": "here",}, } ), ) builder.add_layer( "sha256:abcde", json.dumps( { "id": "anotherid", "author": "😱", "created": "1985-02-03 12:34", "Size": 1234, "container_config": {"Cmd": "barbaz", "more": "stuff", "goes": "here",}, } ), ) built = builder.build(None) built._validate() assert built.leaf_layer_v1_image_id == "someid" with_metadata_removed = builder.with_metadata_removed().build() with_metadata_removed._validate() built_layers = list(built.get_layers(None)) with_metadata_removed_layers = list(with_metadata_removed.get_layers(None)) assert len(built_layers) == len(with_metadata_removed_layers) for index, built_layer in enumerate(built_layers): with_metadata_removed_layer = with_metadata_removed_layers[index] assert built_layer.layer_id == with_metadata_removed_layer.layer_id assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size assert built_layer.command == with_metadata_removed_layer.command assert built_layer.comment == with_metadata_removed_layer.comment assert built_layer.author == with_metadata_removed_layer.author assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime assert built.leaf_layer_v1_image_id == with_metadata_removed.leaf_layer_v1_image_id assert built_layers[-1].layer_id == built.leaf_layer_v1_image_id assert json.loads(built_layers[-1].internal_layer.raw_v1_metadata) == json.loads( with_metadata_removed_layers[-1].internal_layer.raw_v1_metadata )
def test_image_with_cas(default_tag_policy, initialized_db): """ A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag should result in the storage and its CAS data being removed. """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() # Create an image storage record under CAS. content = b"hello world" digest = "sha256:" + hashlib.sha256(content).hexdigest() preferred = storage.preferred_locations[0] storage.put_content({preferred}, storage.blob_path(digest), content) image_storage = database.ImageStorage.create(content_checksum=digest) location = database.ImageStorageLocation.get(name=preferred) database.ImageStoragePlacement.create(location=location, storage=image_storage) # Temp link so its available. model.blob.store_blob_record_and_temp_link_in_repo( repository, digest, location, len(content), 120 ) # Ensure the CAS path exists. assert storage.exists({preferred}, storage.blob_path(digest)) # Store a manifest pointing to that path. builder = DockerSchema1ManifestBuilder( repository.namespace_user.username, repository.name, "first" ) builder.insert_layer( digest, json.dumps( { "id": "i1", } ), ) # Store the manifest. manifest = builder.build(docker_v2_signing_key) repo_ref = RepositoryReference.for_repo_obj(repository) registry_model.create_manifest_and_retarget_tag( repo_ref, manifest, "first", storage, raise_on_error=True ) # Delete the temp reference. _delete_temp_links(repository) # Delete the tag. delete_tag(repository, "first") assert_deleted(repository, "i1") # Ensure the CAS path is gone. assert not storage.exists({preferred}, storage.blob_path(digest))
def generate_legacy_layers(self, images_map, content_retriever): assert not self.has_remote_layer # NOTE: We use the DockerSchema1ManifestBuilder here because it already contains # the logic for generating the DockerV1Metadata. All of this will go away once we get # rid of legacy images in the database, so this is a temporary solution. v1_builder = DockerSchema1ManifestBuilder("", "", "") self._populate_schema1_builder(v1_builder, content_retriever) return v1_builder.build().generate_legacy_layers(images_map, content_retriever)
def test_validate_manifest_with_none_metadata_layer(with_key): builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag") builder.add_layer("sha256:abcde", None) built = builder.build(with_key, ensure_ascii=False) built._validate() # Ensure the manifest can be reloaded. built_bytes = built.bytes.as_encoded_str() DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
def test_validate_manifest_with_emoji(with_key): builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag") builder.add_layer( "sha256:abcde", json.dumps({"id": "someid", "author": "😱",}, ensure_ascii=False) ) built = builder.build(with_key, ensure_ascii=False) built._validate() # Ensure the manifest can be reloaded. built_bytes = built.bytes.as_encoded_str() DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
def commit_tag_and_manifest(self, tag_name, layer): """ Commits a new tag + manifest for that tag to the repository with the given name, pointing to the given layer. """ # Lookup the top layer. image_metadata = self._builder_state.image_metadata.get(layer.layer_id) if image_metadata is None: return None # For each layer/image, add it to the manifest builder. builder = DockerSchema1ManifestBuilder( self._repository_ref.namespace_name, self._repository_ref.name, tag_name) current_layer_id = layer.layer_id while True: v1_metadata_string = self._builder_state.image_metadata.get( current_layer_id) if v1_metadata_string is None: logger.warning("Missing metadata for layer %s", current_layer_id) return None v1_metadata = json.loads(v1_metadata_string) parent_id = v1_metadata.get("parent", None) if parent_id is not None and parent_id not in self._builder_state.image_metadata: logger.warning("Missing parent for layer %s", current_layer_id) return None blob_digest = self._builder_state.image_blobs.get(current_layer_id) if blob_digest is None: logger.warning("Missing blob for layer %s", current_layer_id) return None builder.add_layer(blob_digest, v1_metadata_string) if not parent_id: break current_layer_id = parent_id # Build the manifest. manifest_instance = builder.build(self._legacy_signing_key) # Target the tag at the manifest. manifest, tag = registry_model.create_manifest_and_retarget_tag( self._repository_ref, manifest_instance, tag_name, self._storage) if tag is None: return None self._builder_state.tags[tag_name] = tag._db_id self._save_to_session() return tag
def test_build_unencoded_unicode_manifest(with_key): builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag') builder.add_layer( 'sha256:abcde', json.dumps({ 'id': 'someid', 'author': u'Sômé guy', }, ensure_ascii=False)) built = builder.build(with_key, ensure_ascii=False) built._validate()
def test_build_schema1(): manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES)) assert not manifest.has_remote_layer retriever = ContentRetrieverForTesting({ 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7': CONFIG_BYTES, }) builder = DockerSchema1ManifestBuilder('somenamespace', 'somename', 'sometag') manifest._populate_schema1_builder(builder, retriever) schema1 = builder.build(docker_v2_signing_key) assert schema1.media_type == DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
def test_invalid_manifestlist(): # Build a manifest list with a schema 1 manifest of the wrong architecture. builder = DockerSchema1ManifestBuilder("foo", "bar", "baz") builder.add_layer("sha:2356", '{"id": "foo"}') manifest = builder.build().unsigned() listbuilder = DockerSchema2ManifestListBuilder() listbuilder.add_manifest(manifest, "amd32", "linux") manifestlist = listbuilder.build() retriever = ContentRetrieverForTesting() retriever.add_digest(manifest.digest, manifest.bytes.as_encoded_str()) with pytest.raises(MismatchManifestException): manifestlist.validate(retriever)
def store_tag_manifest(namespace, repo_name, tag_name, image_id): builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name) storage_id_map = {} try: image_storage = ImageStorage.select().where( ~(ImageStorage.content_checksum >> None)).get() builder.add_layer(image_storage.content_checksum, '{"id": "foo"}') storage_id_map[image_storage.content_checksum] = image_storage.id except ImageStorage.DoesNotExist: pass manifest = builder.build(docker_v2_signing_key) manifest_row, _ = model.tag.store_tag_manifest_for_testing( namespace, repo_name, tag_name, manifest, image_id, storage_id_map) return manifest_row
def test_validate_manifest_with_emoji(with_key): builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag') builder.add_layer( 'sha256:abcde', json.dumps({ 'id': 'someid', 'author': u'😱', }, ensure_ascii=False)) built = builder.build(with_key, ensure_ascii=False) built._validate() # Ensure the manifest can be reloaded. built_bytes = built.bytes.as_encoded_str() DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
def test_get_or_create_manifest_invalid_image(initialized_db): repository = get_repository("devtable", "simple") latest_tag = get_tag(repository, "latest") parsed = DockerSchema1Manifest(Bytes.for_string_or_unicode( latest_tag.manifest.manifest_bytes), validate=False) builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag") builder.add_layer(parsed.blob_digests[0], '{"id": "foo", "parent": "someinvalidimageid"}') sample_manifest_instance = builder.build(docker_v2_signing_key) created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage) assert created_manifest is None
def test_create_manifest_and_retarget_tag(registry_model): repository_ref = registry_model.lookup_repository("devtable", "simple") latest_tag = registry_model.get_repo_tag(repository_ref, "latest") manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest() builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag") builder.add_layer(manifest.blob_digests[0], '{"id": "%s"}' % "someid") sample_manifest = builder.build(docker_v2_signing_key) assert sample_manifest is not None another_manifest, tag = registry_model.create_manifest_and_retarget_tag( repository_ref, sample_manifest, "anothertag", storage ) assert another_manifest is not None assert tag is not None assert tag.name == "anothertag" assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
def test_build_unencoded_unicode_manifest(with_key): builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag") builder.add_layer( "sha256:abcde", json.dumps( { "id": "someid", "author": "Sômé guy", }, ensure_ascii=False, ), ) built = builder.build(with_key, ensure_ascii=False) # Assert kid was created correctly # https://docs.docker.com/registry/spec/auth/jwt/ if with_key is not None: assert len(built.signatures) == 1 assert re.match(KID_FORMAT_REGEX, built.signatures[0]["header"]["jwk"]["kid"]) built._validate()
def test_create_manifest_and_retarget_tag(registry_model): repository_ref = registry_model.lookup_repository('devtable', 'simple') latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True) manifest = registry_model.get_manifest_for_tag( latest_tag).get_parsed_manifest() builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag') builder.add_layer(manifest.blob_digests[0], '{"id": "%s"}' % latest_tag.legacy_image.docker_image_id) sample_manifest = builder.build(docker_v2_signing_key) assert sample_manifest is not None another_manifest, tag = registry_model.create_manifest_and_retarget_tag( repository_ref, sample_manifest, 'anothertag', storage) assert another_manifest is not None assert tag is not None assert tag.name == 'anothertag' assert another_manifest.get_parsed_manifest( ).manifest_dict == sample_manifest.manifest_dict
def test_create_manifest_and_retarget_tag_with_labels(registry_model): repository_ref = registry_model.lookup_repository("devtable", "simple") latest_tag = registry_model.get_repo_tag(repository_ref, "latest") manifest = registry_model.get_manifest_for_tag( latest_tag).get_parsed_manifest() json_metadata = { "id": "someid", "config": { "Labels": { "quay.expires-after": "2w", }, }, } builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag") builder.add_layer(manifest.blob_digests[0], json.dumps(json_metadata)) sample_manifest = builder.build(docker_v2_signing_key) assert sample_manifest is not None another_manifest, tag = registry_model.create_manifest_and_retarget_tag( repository_ref, sample_manifest, "anothertag", storage) assert another_manifest is not None assert tag is not None assert tag.name == "anothertag" assert another_manifest.get_parsed_manifest( ).manifest_dict == sample_manifest.manifest_dict # Ensure the labels were applied. assert tag.lifetime_end_ms is not None # Create another tag and retarget it to an existing manifest; it should have an end date. # This is from a Quay's tag api, so it will not attempt to create a manifest first. yet_another_tag = registry_model.retarget_tag(repository_ref, "yet_another_tag", another_manifest, storage, docker_v2_signing_key) assert yet_another_tag.lifetime_end_ms is not None
def test_missing_link(initialized_db): """ Tests for a corner case that could result in missing a link to a blob referenced by a manifest. The test exercises the case as follows: 1) Push a manifest of a single layer with a Docker ID `FIRST_ID`, pointing to blob `FIRST_BLOB`. The database should contain the tag referencing the layer, with no changed ID and the blob not being GCed. 2) Push a manifest of two layers: Layer 1: `FIRST_ID` with blob `SECOND_BLOB`: Will result in a new synthesized ID Layer 2: `SECOND_ID` with blob `THIRD_BLOB`: Will result in `SECOND_ID` pointing to the `THIRD_BLOB`, with a parent pointing to the new synthesized ID's layer. 3) Push a manifest of two layers: Layer 1: `THIRD_ID` with blob `FOURTH_BLOB`: Will result in a new `THIRD_ID` layer Layer 2: `FIRST_ID` with blob `THIRD_BLOB`: Since `FIRST_ID` already points to `SECOND_BLOB`, this will synthesize a new ID. With the current bug, the synthesized ID will match that of `SECOND_ID`, leaving `THIRD_ID` unlinked and therefore, after a GC, missing `FOURTH_BLOB`. """ with set_tag_expiration_policy('devtable', 0): location_name = storage.preferred_locations[0] location = database.ImageStorageLocation.get(name=location_name) # Create first blob. first_blob_sha = 'sha256:' + hashlib.sha256("FIRST").hexdigest() model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, first_blob_sha, location, 0, 0, 0) # Push the first manifest. first_manifest = (DockerSchema1ManifestBuilder( ADMIN_ACCESS_USER, REPO, FIRST_TAG).add_layer( first_blob_sha, '{"id": "first"}').build(docker_v2_signing_key)) _write_manifest(ADMIN_ACCESS_USER, REPO, FIRST_TAG, first_manifest) # Delete all temp tags and perform GC. _perform_cleanup() # Ensure that the first blob still exists, along with the first tag. assert model.blob.get_repo_blob_by_digest(ADMIN_ACCESS_USER, REPO, first_blob_sha) is not None repository_ref = registry_model.lookup_repository( ADMIN_ACCESS_USER, REPO) found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True) assert found_tag is not None assert found_tag.legacy_image.docker_image_id == 'first' # Create the second and third blobs. second_blob_sha = 'sha256:' + hashlib.sha256("SECOND").hexdigest() third_blob_sha = 'sha256:' + hashlib.sha256("THIRD").hexdigest() model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, second_blob_sha, location, 0, 0, 0) model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, third_blob_sha, location, 0, 0, 0) # Push the second manifest. second_manifest = (DockerSchema1ManifestBuilder( ADMIN_ACCESS_USER, REPO, SECOND_TAG).add_layer( third_blob_sha, '{"id": "second", "parent": "first"}').add_layer( second_blob_sha, '{"id": "first"}').build(docker_v2_signing_key)) _write_manifest(ADMIN_ACCESS_USER, REPO, SECOND_TAG, second_manifest) # Delete all temp tags and perform GC. _perform_cleanup() # Ensure that the first and second blobs still exists, along with the second tag. assert registry_model.get_repo_blob_by_digest( repository_ref, first_blob_sha) is not None assert registry_model.get_repo_blob_by_digest( repository_ref, second_blob_sha) is not None assert registry_model.get_repo_blob_by_digest( repository_ref, third_blob_sha) is not None found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True) assert found_tag is not None assert found_tag.legacy_image.docker_image_id == 'first' # Ensure the IDs have changed. found_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True) assert found_tag is not None assert found_tag.legacy_image.docker_image_id != 'second' # Create the fourth blob. fourth_blob_sha = 'sha256:' + hashlib.sha256("FOURTH").hexdigest() model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, fourth_blob_sha, location, 0, 0, 0) # Push the third manifest. third_manifest = ( DockerSchema1ManifestBuilder( ADMIN_ACCESS_USER, REPO, THIRD_TAG).add_layer( third_blob_sha, '{"id": "second", "parent": "first"}').add_layer( fourth_blob_sha, '{"id": "first"}' ) # Note the change in BLOB from the second manifest. .build(docker_v2_signing_key)) _write_manifest(ADMIN_ACCESS_USER, REPO, THIRD_TAG, third_manifest) # Delete all temp tags and perform GC. _perform_cleanup() # Ensure all blobs are present. assert registry_model.get_repo_blob_by_digest( repository_ref, first_blob_sha) is not None assert registry_model.get_repo_blob_by_digest( repository_ref, second_blob_sha) is not None assert registry_model.get_repo_blob_by_digest( repository_ref, third_blob_sha) is not None assert registry_model.get_repo_blob_by_digest( repository_ref, fourth_blob_sha) is not None # Ensure new synthesized IDs were created. second_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True) third_tag = registry_model.get_repo_tag(repository_ref, THIRD_TAG, include_legacy_image=True) assert second_tag.legacy_image.docker_image_id != third_tag.legacy_image.docker_image_id