def test_images_shared_storage(default_tag_policy, initialized_db): """ Repository with two tags, both with the same shared storage. Deleting the first tag should delete the first image, but *not* its storage. """ with assert_gc_integrity(expect_storage_removed=False): repository = create_repository() # Add two tags, each with their own image, but with the same storage. image_storage = model.storage.create_v1_storage( storage.preferred_locations[0]) first_image = Image.create(docker_image_id='i1', repository=repository, storage=image_storage, ancestors='/') second_image = Image.create(docker_image_id='i2', repository=repository, storage=image_storage, ancestors='/') store_tag_manifest(repository.namespace_user.username, repository.name, 'first', first_image.docker_image_id) store_tag_manifest(repository.namespace_user.username, repository.name, 'second', second_image.docker_image_id) # Delete the first tag. delete_tag(repository, 'first') assert_deleted(repository, 'i1') assert_not_deleted(repository, 'i2')
def _is_storage_orphaned(candidate_id): """ Returns the whether the given candidate storage ID is orphaned. Must be executed under a transaction. """ with ensure_under_transaction(): try: ManifestBlob.get(blob=candidate_id) return False except ManifestBlob.DoesNotExist: pass try: Image.get(storage=candidate_id) return False except Image.DoesNotExist: pass try: UploadedBlob.get(blob=candidate_id) return False except UploadedBlob.DoesNotExist: pass return True
def test_images_shared_cas(default_tag_policy, initialized_db): """ A repository, each two tags, pointing to the same image, which has image storage with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the first image, and its storage, but not the file in storage, as it shares its CAS path. """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() # Create two image storage records with the same content checksum. content = "hello world" digest = "sha256:" + hashlib.sha256(content).hexdigest() preferred = storage.preferred_locations[0] storage.put_content({preferred}, storage.blob_path(digest), content) is1 = database.ImageStorage.create(content_checksum=digest, uploading=False) is2 = database.ImageStorage.create(content_checksum=digest, uploading=False) location = database.ImageStorageLocation.get(name=preferred) database.ImageStoragePlacement.create(location=location, storage=is1) database.ImageStoragePlacement.create(location=location, storage=is2) # Ensure the CAS path exists. assert storage.exists({preferred}, storage.blob_path(digest)) # Create two images in the repository, and two tags, each pointing to one of the storages. first_image = Image.create( docker_image_id="i1", repository=repository, storage=is1, ancestors="/" ) second_image = Image.create( docker_image_id="i2", repository=repository, storage=is2, ancestors="/" ) store_tag_manifest( repository.namespace_user.username, repository.name, "first", first_image.docker_image_id, ) store_tag_manifest( repository.namespace_user.username, repository.name, "second", second_image.docker_image_id, ) assert_not_deleted(repository, "i1", "i2") # Delete the first tag. delete_tag(repository, "first") assert_deleted(repository, "i1") assert_not_deleted(repository, "i2") # Ensure the CAS path still exists. assert storage.exists({preferred}, storage.blob_path(digest))
def _check_image_used(legacy_image_id): assert legacy_image_id is not None with db_transaction(): # Check if the image is referenced by a manifest. try: ManifestLegacyImage.select().where(ManifestLegacyImage.image == legacy_image_id).get() return True except ManifestLegacyImage.DoesNotExist: pass # Check if the image is referenced by a tag. try: RepositoryTag.select().where(RepositoryTag.image == legacy_image_id).get() return True except RepositoryTag.DoesNotExist: pass # Check if the image is referenced by another image. try: Image.select().where(Image.parent == legacy_image_id).get() return True except Image.DoesNotExist: pass return False
def _garbage_collect_legacy_image(legacy_image_id, context): assert legacy_image_id is not None # Check if the image is referenced. if _check_image_used(legacy_image_id): return False # We have an unreferenced image. We can now delete it. # Grab any derived storage for the image. for derived in DerivedStorageForImage.select().where( DerivedStorageForImage.source_image == legacy_image_id): context.add_blob_id(derived.derivative_id) try: image = Image.select().where(Image.id == legacy_image_id).get() except Image.DoesNotExist: return False assert image.repository_id == context.repository.id # Add the image's blob to be GCed. context.add_blob_id(image.storage_id) # If the image has a parent ID, add the parent for GC. if image.parent_id is not None: context.add_legacy_image_id(image.parent_id) # Delete the image. with db_transaction(): if _check_image_used(legacy_image_id): return False try: image = Image.select().where(Image.id == legacy_image_id).get() except Image.DoesNotExist: return False assert image.id == legacy_image_id assert image.repository_id == context.repository.id # Delete any derived storage for the image. deleted_derived_storage = (DerivedStorageForImage.delete().where( DerivedStorageForImage.source_image == legacy_image_id).execute()) # Delete the image itself. image.delete_instance() context.mark_legacy_image_removed(image) gc_table_rows_deleted.labels(table="Image").inc() gc_table_rows_deleted.labels( table="DerivedStorageForImage").inc(deleted_derived_storage) if config.image_cleanup_callbacks: for callback in config.image_cleanup_callbacks: callback([image]) return True
def find_create_or_link_image( docker_image_id, repo_obj, username, translations, preferred_location ): # First check for the image existing in the repository. If found, we simply return it. repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name, docker_image_id) if repo_image: return repo_image # We next check to see if there is an existing storage the new image can link to. existing_image_query = ( Image.select(Image, ImageStorage) .distinct() .join(ImageStorage) .switch(Image) .join(Repository) .join(RepositoryPermission, JOIN.LEFT_OUTER) .switch(Repository) .join(Namespace, on=(Repository.namespace_user == Namespace.id)) .where(Image.docker_image_id == docker_image_id) ) existing_image_query = _basequery.filter_to_repos_for_user( existing_image_query, _namespace_id_for_username(username) ) # If there is an existing image, we try to translate its ancestry and copy its storage. new_image = None try: logger.debug("Looking up existing image for ID: %s", docker_image_id) existing_image = existing_image_query.get() logger.debug("Existing image %s found for ID: %s", existing_image.id, docker_image_id) new_image = _find_or_link_image( existing_image, repo_obj, username, translations, preferred_location ) if new_image: return new_image except Image.DoesNotExist: logger.debug("No existing image found for ID: %s", docker_image_id) # Otherwise, create a new storage directly. with db_transaction(): # Final check for an existing image, under the transaction. repo_image = get_repo_image( repo_obj.namespace_user.username, repo_obj.name, docker_image_id ) if repo_image: return repo_image logger.debug("Creating new storage for docker id: %s", docker_image_id) new_storage = storage.create_v1_storage(preferred_location) return Image.create( docker_image_id=docker_image_id, repository=repo_obj, storage=new_storage, ancestors="/" )
def get_image_with_storage_and_parent_base(): Parent = Image.alias() ParentImageStorage = ImageStorage.alias() return ( Image.select(Image, ImageStorage, Parent, ParentImageStorage) .join(ImageStorage) .switch(Image) .join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id)) .join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage)) )
def _get_repo_tag_image(tag_name, include_storage, modifier): query = Image.select().join(RepositoryTag) if include_storage: query = (Image.select( Image, ImageStorage).join(ImageStorage).switch(Image).join(RepositoryTag)) images = _tag_alive(modifier(query.where(RepositoryTag.name == tag_name))) if not images: raise DataModelException("Unable to find image for tag.") else: return images[0]
def _find_or_link_image(existing_image, repo_obj, username, translations, preferred_location): with db_transaction(): # Check for an existing image, under the transaction, to make sure it doesn't already exist. repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name, existing_image.docker_image_id) if repo_image: return repo_image # Make sure the existing base image still exists. try: to_copy = Image.select().join(ImageStorage).where( Image.id == existing_image.id).get() msg = "Linking image to existing storage with docker id: %s and uuid: %s" logger.debug(msg, existing_image.docker_image_id, to_copy.storage.uuid) new_image_ancestry = __translate_ancestry(to_copy.ancestors, translations, repo_obj, username, preferred_location) copied_storage = to_copy.storage translated_parent_id = None if new_image_ancestry != "/": translated_parent_id = int(new_image_ancestry.split("/")[-2]) new_image = Image.create( docker_image_id=existing_image.docker_image_id, repository=repo_obj, storage=copied_storage, ancestors=new_image_ancestry, command=existing_image.command, created=existing_image.created, comment=existing_image.comment, v1_json_metadata=existing_image.v1_json_metadata, aggregate_size=existing_image.aggregate_size, parent=translated_parent_id, v1_checksum=existing_image.v1_checksum, ) logger.debug("Storing translation %s -> %s", existing_image.id, new_image.id) translations[existing_image.id] = new_image.id return new_image except Image.DoesNotExist: return None
def set_image_storage_metadata(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size): """ Sets metadata that is specific to the binary storage of the data, irrespective of how it is used in the layer tree. """ if image_size is None: raise DataModelException('Empty image size field') try: image = (Image.select(Image, ImageStorage).join(Repository).join( Namespace, on=(Repository.namespace_user == Namespace.id )).switch(Image).join(ImageStorage).where( Repository.name == repository_name, Namespace.username == namespace_name, Image.docker_image_id == docker_image_id).get()) except ImageStorage.DoesNotExist: raise InvalidImageException( 'No image with specified id and repository') # We MUST do this here, it can't be done in the corresponding image call because the storage # has not yet been pushed image.aggregate_size = _basequery.calculate_image_aggregate_size( image.ancestors, image_size, image.parent) image.save() image.storage.image_size = image_size image.storage.uncompressed_size = uncompressed_size image.storage.save() return image.storage
def test_manifest_backfill_broken_tag(clear_rows, initialized_db): """ Tests backfilling a broken tag. """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() # Create a tag with an image referenced missing parent images. repo = model.repository.get_repository("devtable", "gargantuan") broken_image = Image.create( docker_image_id="foo", repository=repo, ancestors="/348723847234/", storage=ImageStorage.get(), ) broken_image_tag = RepositoryTag.create(repository=repo, image=broken_image, name="broken") # Backfill the tag. assert backfill_tag(broken_image_tag) # Ensure we backfilled, even though we reference a broken manifest. tag_manifest = TagManifest.get(tag=broken_image_tag) map_row = TagManifestToManifest.get(tag_manifest=tag_manifest) manifest = map_row.manifest assert manifest.manifest_bytes == tag_manifest.json_data tag = TagToRepositoryTag.get(repository_tag=broken_image_tag).tag assert tag.name == "broken" assert tag.manifest == manifest
def __translate_ancestry(old_ancestry, translations, repo_obj, username, preferred_location): if old_ancestry == "/": return "/" def translate_id(old_id, docker_image_id): logger.debug("Translating id: %s", old_id) if old_id not in translations: image_in_repo = find_create_or_link_image(docker_image_id, repo_obj, username, translations, preferred_location) translations[old_id] = image_in_repo.id return translations[old_id] # Select all the ancestor Docker IDs in a single query. old_ids = [int(id_str) for id_str in old_ancestry.split("/")[1:-1]] query = Image.select(Image.id, Image.docker_image_id).where(Image.id << old_ids) old_images = {i.id: i.docker_image_id for i in query} # Translate the old images into new ones. new_ids = [ str(translate_id(old_id, old_images[old_id])) for old_id in old_ids ] return "/%s/" % "/".join(new_ids)
def test_get_matching_tag_ids_images_filtered(initialized_db): def filter_query(query): return query.join(Repository).where(Repository.name == "simple") filtered_images = filter_query( Image.select(Image, ImageStorage) .join(RepositoryTag) .switch(Image) .join(ImageStorage) .switch(Image) ) expected_tags_query = _tag_alive(filter_query(RepositoryTag.select())) pairs = [] for image in filtered_images: pairs.append((image.docker_image_id, image.storage.uuid)) matching_tags = get_matching_tags_for_images( pairs, filter_images=filter_query, filter_tags=filter_query ) expected_tag_ids = set([tag.id for tag in expected_tags_query]) matching_tags_ids = set([tag.id for tag in matching_tags]) # Ensure every alive tag was found. assert matching_tags_ids == expected_tag_ids
def set_secscan_status(image, indexed, version): return ( Image.update(security_indexed=indexed, security_indexed_engine=version) .where(Image.id == image.id) .where((Image.security_indexed_engine != version) | (Image.security_indexed != indexed)) .execute() ) != 0
def backfill_replication(): encountered = set() query = ( Image.select(Image, ImageStorage, Repository, User) .join(ImageStorage) .switch(Image) .join(Repository) .join(User) ) for image in query: if image.storage.uuid in encountered: continue namespace = image.repository.namespace_user locations = model.user.get_region_locations(namespace) locations_required = locations | set(storage.default_locations) query = ( ImageStoragePlacement.select(ImageStoragePlacement, ImageStorageLocation) .where(ImageStoragePlacement.storage == image.storage) .join(ImageStorageLocation) ) existing_locations = set([p.location.name for p in query]) locations_missing = locations_required - existing_locations if locations_missing: print("Enqueueing image storage %s to be replicated" % (image.storage.uuid)) encountered.add(image.storage.uuid) if not image_replication_queue.alive([image.storage.uuid]): queue_storage_replication(image.repository.namespace_user.username, image.storage)
def _populate_manifest_and_blobs(repository, manifest, storage_id_map, leaf_layer_id=None): leaf_layer_id = leaf_layer_id or manifest.leaf_layer_v1_image_id try: legacy_image = Image.get(Image.docker_image_id == leaf_layer_id, Image.repository == repository) except Image.DoesNotExist: raise DataModelException("Invalid image with id: %s" % leaf_layer_id) storage_ids = set() for blob_digest in manifest.local_blob_digests: image_storage_id = storage_id_map.get(blob_digest) if image_storage_id is None: logger.error("Missing blob for manifest `%s` in: %s", blob_digest, storage_id_map) raise DataModelException("Missing blob for manifest `%s`" % blob_digest) if image_storage_id in storage_ids: continue storage_ids.add(image_storage_id) return populate_manifest(repository, manifest, legacy_image, storage_ids)
def test_create_temp_tag_deleted_repo(initialized_db): repo = model.repository.get_repository("devtable", "simple") repo.state = RepositoryState.MARKED_FOR_DELETION repo.save() image = Image.get(repository=repo) assert model.tag.create_temporary_hidden_tag(repo, image, 10000000) is None
def get_image(repo, docker_image_id): try: return (Image.select(Image, ImageStorage).join(ImageStorage).where( Image.docker_image_id == docker_image_id, Image.repository == repo).get()) except Image.DoesNotExist: return None
def test_load_security_information_api_responses(secscan_api_response, initialized_db): repository_ref = registry_model.lookup_repository("devtable", "simple") tag = registry_model.get_repo_tag(repository_ref, "latest", include_legacy_image=True) manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True, include_legacy_image=True) set_secscan_status(Image.get(id=manifest.legacy_image._db_id), True, 3) secscan = V2SecurityScanner(app, instance_keys, storage) secscan._legacy_secscan_api = mock.Mock() secscan._legacy_secscan_api.get_layer_data.return_value = secscan_api_response security_information = secscan.load_security_information( manifest).security_information assert isinstance(security_information, SecurityInformation) assert security_information.Layer.Name == secscan_api_response[ "Layer"].get("Name", "") assert security_information.Layer.ParentName == secscan_api_response[ "Layer"].get("ParentName", "") assert security_information.Layer.IndexedByVersion == secscan_api_response[ "Layer"].get("IndexedByVersion", None) assert len(security_information.Layer.Features) == len( secscan_api_response["Layer"].get("Features", []))
def synthesize_v1_image( repo, image_storage_id, storage_image_size, docker_image_id, created_date_str, comment, command, v1_json_metadata, parent_image=None, ): """ Find an existing image with this docker image id, and if none exists, write one with the specified metadata. """ ancestors = "/" if parent_image is not None: ancestors = "{0}{1}/".format(parent_image.ancestors, parent_image.id) created = None if created_date_str is not None: try: created = dateutil.parser.parse(created_date_str).replace(tzinfo=None) except: # parse raises different exceptions, so we cannot use a specific kind of handler here. pass # Get the aggregate size for the image. aggregate_size = _basequery.calculate_image_aggregate_size( ancestors, storage_image_size, parent_image ) try: return Image.create( docker_image_id=docker_image_id, ancestors=ancestors, comment=comment, command=command, v1_json_metadata=v1_json_metadata, created=created, storage=image_storage_id, repository=repo, parent=parent_image, aggregate_size=aggregate_size, ) except IntegrityError: return Image.get(docker_image_id=docker_image_id, repository=repo)
def _get_dangling_storage_count(): storage_ids = set([current.id for current in ImageStorage.select()]) referenced_by_image = set([image.storage_id for image in Image.select()]) referenced_by_manifest = set([blob.blob_id for blob in ManifestBlob.select()]) referenced_by_derived = set( [derived.derivative_id for derived in DerivedStorageForImage.select()] ) return len(storage_ids - referenced_by_image - referenced_by_derived - referenced_by_manifest)
def _get_repository_images(namespace_name, repository_name, query_modifier): query = (Image.select().join(Repository).join( Namespace, on=(Repository.namespace_user == Namespace.id)).where( Repository.name == repository_name, Namespace.username == namespace_name)) query = query_modifier(query) return query
def purge_repository(repo, force=False): """ Completely delete all traces of the repository. Will return True upon complete success, and False upon partial or total failure. Garbage collection is incremental and repeatable, so this return value does not need to be checked or responded to. """ assert repo.state == RepositoryState.MARKED_FOR_DELETION or force # Delete the repository of all Appr-referenced entries. # Note that new-model Tag's must be deleted in *two* passes, as they can reference parent tags, # and MySQL is... particular... about such relationships when deleting. if repo.kind.name == "application": ApprTag.delete().where(ApprTag.repository == repo, ~(ApprTag.linked_tag >> None)).execute() ApprTag.delete().where(ApprTag.repository == repo).execute() else: # GC to remove the images and storage. _purge_repository_contents(repo) # Ensure there are no additional tags, manifests, images or blobs in the repository. assert ApprTag.select().where(ApprTag.repository == repo).count() == 0 assert Tag.select().where(Tag.repository == repo).count() == 0 assert RepositoryTag.select().where( RepositoryTag.repository == repo).count() == 0 assert Manifest.select().where(Manifest.repository == repo).count() == 0 assert ManifestBlob.select().where( ManifestBlob.repository == repo).count() == 0 assert Image.select().where(Image.repository == repo).count() == 0 # Delete any repository build triggers, builds, and any other large-ish reference tables for # the repository. _chunk_delete_all(repo, RepositoryPermission, force=force) _chunk_delete_all(repo, RepositoryBuild, force=force) _chunk_delete_all(repo, RepositoryBuildTrigger, force=force) _chunk_delete_all(repo, RepositoryActionCount, force=force) _chunk_delete_all(repo, Star, force=force) _chunk_delete_all(repo, AccessToken, force=force) _chunk_delete_all(repo, RepositoryNotification, force=force) _chunk_delete_all(repo, BlobUpload, force=force) _chunk_delete_all(repo, RepoMirrorConfig, force=force) _chunk_delete_all(repo, RepositoryAuthorizedEmail, force=force) # Delete any marker rows for the repository. DeletedRepository.delete().where( DeletedRepository.repository == repo).execute() # Delete the rest of the repository metadata. try: # Make sure the repository still exists. fetched = Repository.get(id=repo.id) except Repository.DoesNotExist: return False fetched.delete_instance(recursive=True, delete_nullable=False, force=force) return True
def get_image_with_storage(docker_image_id, storage_uuid): """ Returns the image with the given docker image ID and storage uuid or None if none. """ try: return (Image.select(Image, ImageStorage).join(ImageStorage).where( Image.docker_image_id == docker_image_id, ImageStorage.uuid == storage_uuid).get()) except Image.DoesNotExist: return None
def _purge_repository_contents(repo): """ Purges all the contents of a repository, removing all of its tags, manifests and images. """ logger.debug('Purging repository %s', repo) # Purge via all the tags. while True: found = False for tags in _chunk_iterate_for_deletion(Tag.select().where(Tag.repository == repo)): logger.debug('Found %s tags to GC under repository %s', len(tags), repo) found = True context = _GarbageCollectorContext(repo) for tag in tags: logger.debug('Deleting tag %s under repository %s', tag, repo) assert tag.repository_id == repo.id _purge_oci_tag(tag, context, allow_non_expired=True) _run_garbage_collection(context) if not found: break # TODO: remove this once we're fully on the OCI data model. while True: found = False repo_tag_query = RepositoryTag.select().where(RepositoryTag.repository == repo) for tags in _chunk_iterate_for_deletion(repo_tag_query): logger.debug('Found %s tags to GC under repository %s', len(tags), repo) found = True context = _GarbageCollectorContext(repo) for tag in tags: logger.debug('Deleting tag %s under repository %s', tag, repo) assert tag.repository_id == repo.id _purge_pre_oci_tag(tag, context, allow_non_expired=True) _run_garbage_collection(context) if not found: break # Add all remaining images to a new context. We do this here to minimize the number of images # we need to load. while True: found_image = False image_context = _GarbageCollectorContext(repo) for image in Image.select().where(Image.repository == repo): found_image = True logger.debug('Deleting image %s under repository %s', image, repo) assert image.repository_id == repo.id image_context.add_legacy_image_id(image.id) _run_garbage_collection(image_context) if not found_image: break
def test_select_images_to_scan(self): # Set all images to have a security index of a version to that of the config. expected_version = app.config["SECURITY_SCANNER_ENGINE_VERSION_TARGET"] Image.update(security_indexed_engine=expected_version).execute() # Ensure no images are available for scanning. self.assertIsNone( model.image.get_min_id_for_sec_scan(expected_version)) self.assertTrue( len(model.image.get_images_eligible_for_scan(expected_version)) == 0) # Check for a higher version. self.assertIsNotNone( model.image.get_min_id_for_sec_scan(expected_version + 1)) self.assertTrue( len(model.image.get_images_eligible_for_scan(expected_version + 1)) > 0)
def get_repository_images_without_placements(repo_obj, with_ancestor=None): query = Image.select(Image, ImageStorage).join(ImageStorage).where(Image.repository == repo_obj) if with_ancestor: ancestors_string = "%s%s/" % (with_ancestor.ancestors, with_ancestor.id) query = query.where( (Image.ancestors ** (ancestors_string + "%")) | (Image.id == with_ancestor.id) ) return query
def _temp_link_blob(repository_id, storage, link_expiration_s): """ Note: Should *always* be called by a parent under a transaction. """ random_image_name = str(uuid4()) # Create a temporary link into the repository, to be replaced by the v1 metadata later # and create a temporary tag to reference it image = Image.create(storage=storage, docker_image_id=random_image_name, repository=repository_id) tag.create_temporary_hidden_tag(repository_id, image, link_expiration_s)
def test_purge_repository_storage_blob(default_tag_policy, initialized_db): with populate_storage_for_gc(): expected_blobs_removed_from_storage = set() preferred = storage.preferred_locations[0] # Check that existing uploadedblobs has an object in storage for repo in database.Repository.select().order_by(database.Repository.id): for uploadedblob in UploadedBlob.select().where(UploadedBlob.repository == repo): assert storage.exists( {preferred}, storage.blob_path(uploadedblob.blob.content_checksum) ) # Remove eveyrhing for repo in database.Repository.select(): # .order_by(database.Repository.id): for uploadedblob in UploadedBlob.select().where(UploadedBlob.repository == repo): # Check if only this repository is referencing the uploadedblob # If so, the blob should be removed from storage has_depedent_manifestblob = ( ManifestBlob.select() .where( ManifestBlob.blob == uploadedblob.blob, ManifestBlob.repository != repo, ) .count() ) has_dependent_image = ( Image.select() .where( Image.storage == uploadedblob.blob, Image.repository != repo, ) .count() ) has_dependent_uploadedblobs = ( UploadedBlob.select() .where( UploadedBlob == uploadedblob, UploadedBlob.repository != repo, ) .count() ) if ( not has_depedent_manifestblob and not has_dependent_image and not has_dependent_uploadedblobs ): expected_blobs_removed_from_storage.add(uploadedblob.blob) assert model.gc.purge_repository(repo, force=True) for removed_blob_from_storage in expected_blobs_removed_from_storage: assert not storage.exists( {preferred}, storage.blob_path(removed_blob_from_storage.content_checksum) )
def test_get_matching_tag_ids_for_all_images(max_subqueries, max_image_lookup_count, initialized_db): with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries): with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count): pairs = [] for image in Image.select(Image, ImageStorage).join(ImageStorage): pairs.append((image.docker_image_id, image.storage.uuid)) expected_tags_ids = set([tag.id for tag in _tag_alive(RepositoryTag.select())]) matching_tags_ids = set([tag.id for tag in get_matching_tags_for_images(pairs)]) # Ensure every alive tag was found. assert matching_tags_ids == expected_tags_ids