def set_tag_end_ms(tag, end_ms): """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration or tests. """ with db_transaction(): updated = (Tag.update(lifetime_end_ms=end_ms).where( Tag.id == tag).where( Tag.lifetime_end_ms == tag.lifetime_end_ms).execute()) if updated != 1: return (None, False) # TODO: Remove the linkage code once RepositoryTag is gone. try: old_style_tag = (TagToRepositoryTag.select( TagToRepositoryTag, RepositoryTag).join(RepositoryTag).where( TagToRepositoryTag.tag == tag).get()).repository_tag old_style_tag.lifetime_end_ts = end_ms // 1000 if end_ms is not None else None old_style_tag.save() except TagToRepositoryTag.DoesNotExist: pass return (tag.lifetime_end_ms, True)
def batch_create_manifest_labels(self, manifest): """ Returns a context manager for batch creation of labels on a manifest. Can raise InvalidLabelKeyException or InvalidMediaTypeException depending on the validation errors. """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: yield None return labels_to_add = [] def add_label(key, value, source_type_name, media_type_name=None): labels_to_add.append( dict(key=key, value=value, source_type_name=source_type_name, media_type_name=media_type_name)) yield add_label # TODO: make this truly batch once we've fully transitioned to V2_2 and no longer need # the mapping tables. for label in labels_to_add: with db_transaction(): # Create the label itself. model.label.create_manifest_label(tag_manifest, **label) # Apply any changes to the manifest that the label prescribes. apply_label_to_manifest(label, manifest, self)
def delete_manifest_label(label_uuid, manifest): """ Deletes the manifest label on the tag manifest with the given ID. Returns the label deleted or None if none. """ # Find the label itself. label = get_manifest_label(label_uuid, manifest) if label is None: return None if not label.source_type.mutable: raise DataModelException('Cannot delete immutable label') # Delete the mapping records and label. # TODO: Remove this code once the TagManifest table is gone. with db_transaction(): (TagManifestLabelMap .delete() .where(TagManifestLabelMap.label == label) .execute()) deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute() if deleted_count != 1: logger.warning('More than a single label deleted for matching label %s', label_uuid) deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute() if deleted_count != 1: logger.warning('More than a single label deleted for matching label %s', label_uuid) label.delete_instance(recursive=False) return label
def lookup_manifest( repository_id, manifest_digest, allow_dead=False, require_available=False, temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC, ): """ Returns the manifest with the specified digest under the specified repository or None if none. If allow_dead is True, then manifests referenced by only dead tags will also be returned. If require_available is True, the manifest will be marked with a temporary tag to ensure it remains available. """ if not require_available: return _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead) with db_transaction(): found = _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead) if found is None: return None create_temporary_tag_if_necessary(found, temp_tag_expiration_sec) return found
def _delete_tag(tag, now_ms): """ Deletes the given tag by marking it as expired. """ now_ts = int(now_ms // 1000) with db_transaction(): updated = (Tag.update(lifetime_end_ms=now_ms).where( Tag.id == tag.id, Tag.lifetime_end_ms == tag.lifetime_end_ms).execute()) if updated != 1: return None # TODO: Remove the linkage code once RepositoryTag is gone. try: old_style_tag = (TagToRepositoryTag.select( TagToRepositoryTag, RepositoryTag).join(RepositoryTag).where( TagToRepositoryTag.tag == tag).get()).repository_tag old_style_tag.lifetime_end_ts = now_ts old_style_tag.save() except TagToRepositoryTag.DoesNotExist: pass return tag
def test_manifest_iterator(initialized_db, set_secscan_config, index_status, indexer_state, seconds, expect_zero): secscan = V4SecurityScanner(app, instance_keys, storage) for manifest in Manifest.select(): with db_transaction(): ManifestSecurityStatus.delete().where( ManifestSecurityStatus.manifest == manifest, ManifestSecurityStatus.repository == manifest.repository, ).execute() ManifestSecurityStatus.create( manifest=manifest, repository=manifest.repository, error_json={}, index_status=index_status, indexer_hash="old hash", indexer_version=IndexerVersion.V4, last_indexed=datetime.utcnow() - timedelta(seconds=seconds), metadata_json={}, ) iterator = secscan._get_manifest_iterator( indexer_state, Manifest.select(fn.Min(Manifest.id)).scalar(), Manifest.select(fn.Max(Manifest.id)).scalar(), ) count = 0 for candidate, abt, num_remaining in iterator: count = count + 1 if expect_zero: assert count == 0 else: assert count != 0
def commit_to_blob(self, app_config, expected_digest=None): """ Commits the blob upload to a blob under the repository. The resulting blob will be marked to not be GCed for some period of time (as configured by `committed_blob_expiration`). If expected_digest is specified, the content digest of the data uploaded for the blob is compared to that given and, if it does not match, a BlobDigestMismatchException is raised. The digest given must be of type `Digest` and not a string. """ # Compare the content digest. if expected_digest is not None: self._validate_digest(expected_digest) # Finalize the storage. storage_already_existed = self._finalize_blob_storage(app_config) # Convert the upload to a blob. computed_digest_str = digest_tools.sha256_digest_from_hashlib( self.blob_upload.sha_state) with db_transaction(): blob = registry_model.commit_blob_upload( self.blob_upload, computed_digest_str, self.settings.committed_blob_expiration) if blob is None: return None self.committed_blob = blob return blob
def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None): """ Creates a label on the manifest with the given key and value. """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: return None label_data = dict(key=key, value=value, source_type_name=source_type_name, media_type_name=media_type_name) with db_transaction(): # Create the label itself. label = model.label.create_manifest_label(tag_manifest, key, value, source_type_name, media_type_name) # Apply any changes to the manifest that the label prescribes. apply_label_to_manifest(label_data, manifest, self) return Label.for_label(label)
def create_temporary_tag_if_necessary(manifest, expiration_sec): """ Creates a temporary tag pointing to the given manifest, with the given expiration in seconds, unless there is an existing tag that will keep the manifest around. """ tag_name = "$temp-%s" % str(uuid.uuid4()) now_ms = get_epoch_timestamp_ms() end_ms = now_ms + (expiration_sec * 1000) # Check if there is an existing tag on the manifest that won't expire within the # timeframe. If so, no need for a temporary tag. with db_transaction(): try: (Tag.select().where( Tag.manifest == manifest, (Tag.lifetime_end_ms >> None) | (Tag.lifetime_end_ms >= end_ms), ).get()) return None except Tag.DoesNotExist: pass return Tag.create( name=tag_name, repository=manifest.repository_id, lifetime_start_ms=now_ms, lifetime_end_ms=end_ms, reversion=False, hidden=True, manifest=manifest, tag_kind=Tag.tag_kind.get_id("tag"), )
def create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False): """ Create a new application release, it includes creating a new Tag, ManifestList, ManifestListManifests, Manifest, ManifestBlob. To deduplicate the ManifestList, the manifestlist_json is kept ordered by the manifest.id. To find the insert point in the ManifestList it uses bisect on the manifest-ids list. """ ManifestList = models_ref.ManifestList ManifestListManifest = models_ref.ManifestListManifest Blob = models_ref.Blob ManifestBlob = models_ref.ManifestBlob with db_transaction(): # Create/get the package manifest manifest = manifest_model.get_or_create_manifest(manifest_data, manifest_data['mediaType'], models_ref) # get the tag tag = tag_model.get_or_initialize_tag(repo, tag_name, models_ref) if tag.manifest_list is None: tag.manifest_list = ManifestList(media_type=ManifestList.media_type.get_id(LIST_MEDIA_TYPE), schema_version=SCHEMA_VERSION, manifest_list_json=[], ) elif tag_model.tag_media_type_exists(tag, manifest.media_type, models_ref): if force: delete_app_release(repo, tag_name, manifest.media_type.name, models_ref) return create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False) else: raise PackageAlreadyExists("package exists already") list_json = tag.manifest_list.manifest_list_json mlm_query = (ManifestListManifest .select() .where(ManifestListManifest.manifest_list == tag.manifest_list)) list_manifest_ids = sorted([mlm.manifest_id for mlm in mlm_query]) insert_point = bisect.bisect_left(list_manifest_ids, manifest.id) list_json.insert(insert_point, manifest.manifest_json) list_manifest_ids.insert(insert_point, manifest.id) manifestlist = manifest_list_model.get_or_create_manifest_list(list_json, LIST_MEDIA_TYPE, SCHEMA_VERSION, models_ref) manifest_list_model.create_manifestlistmanifest(manifestlist, list_manifest_ids, list_json, models_ref) tag = tag_model.create_or_update_tag(repo, tag_name, models_ref, manifest_list=manifestlist, tag_kind="release") blob_digest = digest try: (ManifestBlob .select() .join(Blob) .where(ManifestBlob.manifest == manifest, Blob.digest == _ensure_sha256_header(blob_digest)).get()) except ManifestBlob.DoesNotExist: blob = blob_model.get_blob(blob_digest, models_ref) ManifestBlob.create(manifest=manifest, blob=blob) return tag
def create_manifest_label(tag_manifest, key, value, source_type_name, media_type_name=None): """ Creates a new manifest label on a specific tag manifest. """ if not key: raise InvalidLabelKeyException("Missing key on label") # Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker # does not currently prevent them from being put into said manifests. if not validate_label_key(key) and source_type_name != "manifest": raise InvalidLabelKeyException( "Label key `%s` is invalid or reserved" % key) # Find the matching media type. If none specified, we infer. if media_type_name is None: media_type_name = "text/plain" if is_json(value): media_type_name = "application/json" media_type_id = _get_media_type_id(media_type_name) if media_type_id is None: raise InvalidMediaTypeException() source_type_id = _get_label_source_type_id(source_type_name) with db_transaction(): label = Label.create(key=key, value=value, source_type=source_type_id, media_type=media_type_id) tag_manifest_label = TagManifestLabel.create( annotated=tag_manifest, label=label, repository=tag_manifest.tag.repository) try: mapping_row = TagManifestToManifest.get(tag_manifest=tag_manifest) if mapping_row.manifest: manifest_label = ManifestLabel.create( manifest=mapping_row.manifest, label=label, repository=tag_manifest.tag.repository, ) TagManifestLabelMap.create( manifest_label=manifest_label, tag_manifest_label=tag_manifest_label, label=label, manifest=mapping_row.manifest, tag_manifest=tag_manifest, ) except TagManifestToManifest.DoesNotExist: pass return label
def delete_members_not_present(team, member_id_set): """ Deletes all members of the given team that are not found in the member ID set. """ with db_transaction(): user_ids = set([u.id for u in list_team_users(team)]) to_delete = list(user_ids - member_id_set) if to_delete: query = TeamMember.delete().where(TeamMember.team == team, TeamMember.user << to_delete) return query.execute() return 0
def enable_mirroring_for_repository( repository, root_rule, internal_robot, external_reference, sync_interval, external_registry_username=None, external_registry_password=None, external_registry_config=None, is_enabled=True, sync_start_date=None, ): """ Create a RepoMirrorConfig and set the Repository to the MIRROR state. """ assert internal_robot.robot namespace, _ = parse_robot_username(internal_robot.username) if namespace != repository.namespace_user.username: raise DataModelException("Cannot use robot for mirroring") with db_transaction(): # Create the RepoMirrorConfig try: username = ( DecryptedValue(external_registry_username) if external_registry_username else None ) password = ( DecryptedValue(external_registry_password) if external_registry_password else None ) mirror = RepoMirrorConfig.create( repository=repository, root_rule=root_rule, is_enabled=is_enabled, internal_robot=internal_robot, external_reference=external_reference, external_registry_username=username, external_registry_password=password, external_registry_config=external_registry_config or {}, sync_interval=sync_interval, sync_start_date=sync_start_date or datetime.utcnow(), ) except IntegrityError: return RepoMirrorConfig.get(repository=repository) # Change Repository state to mirroring mode as needed if repository.state != RepositoryState.MIRROR: query = Repository.update(state=RepositoryState.MIRROR).where( Repository.id == repository.id ) if not query.execute(): raise DataModelException("Could not change the state of the repository") return mirror
def _get_manifest_id(repositorytag): repository_tag_datatype = TagDataType.for_repository_tag(repositorytag) # Retrieve the TagManifest for the RepositoryTag, backfilling if necessary. with db_transaction(): manifest_datatype = None try: manifest_datatype = pre_oci_model.get_manifest_for_tag( repository_tag_datatype, backfill_if_necessary=True) except MalformedSchema1Manifest: logger.exception('Error backfilling manifest for tag `%s`', repositorytag.id) if manifest_datatype is None: logger.error('Could not load or backfill manifest for tag `%s`', repositorytag.id) # Create a broken manifest for the tag. tag_manifest = TagManifest.create(tag=repositorytag, digest='BROKEN-%s' % repositorytag.id, json_data='{}') else: # Retrieve the new-style Manifest for the TagManifest, if any. try: tag_manifest = TagManifest.get(id=manifest_datatype._db_id) except TagManifest.DoesNotExist: logger.exception('Could not find tag manifest') return None try: found = TagManifestToManifest.get(tag_manifest=tag_manifest).manifest # Verify that the new-style manifest has the same contents as the old-style manifest. # If not, update and then return. This is an extra check put in place to ensure unicode # manifests have been correctly copied. if found.manifest_bytes != tag_manifest.json_data: logger.warning('Fixing manifest `%s`', found.id) found.manifest_bytes = tag_manifest.json_data found.save() return found.id except TagManifestToManifest.DoesNotExist: # Could not find the new style manifest, so backfill. _backfill_manifest(tag_manifest) # Try to retrieve the manifest again, since we've performed a backfill. try: return TagManifestToManifest.get(tag_manifest=tag_manifest).manifest_id except TagManifestToManifest.DoesNotExist: return None
def get_or_create_manifest(manifest_json, media_type_name, models_ref): Manifest = models_ref.Manifest digest = _digest(manifest_json) try: manifest = get_manifest_query(digest, media_type_name, models_ref).get() except Manifest.DoesNotExist: with db_transaction(): manifest = Manifest.create( digest=digest, manifest_json=manifest_json, media_type=Manifest.media_type.get_id(media_type_name), ) return manifest
def mark_manifest_unsupported(manifest): with db_transaction(): ManifestSecurityStatus.delete().where( ManifestSecurityStatus.manifest == manifest._db_id, ManifestSecurityStatus.repository == manifest.repository._db_id, ).execute() ManifestSecurityStatus.create( manifest=manifest._db_id, repository=manifest.repository._db_id, index_status=IndexStatus.MANIFEST_UNSUPPORTED, indexer_hash="none", indexer_version=IndexerVersion.V4, metadata_json={}, )
def delete_tags_for_manifest(manifest): """ Deletes all tags pointing to the given manifest. Returns the list of tags deleted. """ query = Tag.select().where(Tag.manifest == manifest) query = filter_to_alive_tags(query) query = filter_to_visible_tags(query) tags = list(query) now_ms = get_epoch_timestamp_ms() with db_transaction(): for tag in tags: _delete_tag(tag, now_ms) return tags
def create_manifest_label(manifest_id, key, value, source_type_name, media_type_name=None): """ Creates a new manifest label on a specific tag manifest. """ if not key: raise InvalidLabelKeyException("Missing key on label") # Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker # does not currently prevent them from being put into said manifests. if source_type_name != "manifest" and not validate_label_key(key): raise InvalidLabelKeyException("Key `%s` is invalid or reserved" % key) # Find the matching media type. If none specified, we infer. if media_type_name is None: media_type_name = "text/plain" if is_json(value): media_type_name = "application/json" try: media_type_id = Label.media_type.get_id(media_type_name) except MediaType.DoesNotExist: raise InvalidMediaTypeException() source_type_id = Label.source_type.get_id(source_type_name) # Ensure the manifest exists. try: manifest = ( Manifest.select(Manifest, Repository) .join(Repository) .where(Manifest.id == manifest_id) .get() ) except Manifest.DoesNotExist: return None repository = manifest.repository with db_transaction(): label = Label.create( key=key, value=value, source_type=source_type_id, media_type=media_type_id ) manifest_label = ManifestLabel.create( manifest=manifest_id, label=label, repository=repository ) return label
def backfill_tag(repositorytag): logger.info("Backfilling tag %s", repositorytag.id) # Ensure that a mapping row doesn't already exist. If it does, nothing more to do. if lookup_map_row(repositorytag): return False # Grab the manifest for the RepositoryTag, backfilling as necessary. manifest_id = _get_manifest_id(repositorytag) if manifest_id is None: return True lifetime_start_ms = ( repositorytag.lifetime_start_ts * 1000 if repositorytag.lifetime_start_ts is not None else None ) lifetime_end_ms = ( repositorytag.lifetime_end_ts * 1000 if repositorytag.lifetime_end_ts is not None else None ) # Create the new Tag. with db_transaction(): if lookup_map_row(repositorytag): return False try: created = Tag.create( name=repositorytag.name, repository=repositorytag.repository, lifetime_start_ms=lifetime_start_ms, lifetime_end_ms=lifetime_end_ms, reversion=repositorytag.reversion, manifest=manifest_id, tag_kind=Tag.tag_kind.get_id("tag"), ) TagToRepositoryTag.create( tag=created, repository_tag=repositorytag, repository=repositorytag.repository ) except IntegrityError: logger.exception("Could not create tag for repo tag `%s`", repositorytag.id) return False logger.info("Backfilled tag %s", repositorytag.id) return True
def get_or_create_manifest_list(manifest_list_json, media_type_name, schema_version, models_ref): ManifestList = models_ref.ManifestList digest = _digest(manifest_list_json) media_type_id = ManifestList.media_type.get_id(media_type_name) try: return get_manifest_list(digest, models_ref) except ManifestList.DoesNotExist: with db_transaction(): manifestlist = ManifestList.create( digest=digest, manifest_list_json=manifest_list_json, schema_version=schema_version, media_type=media_type_id) return manifestlist
def backfill_label(tag_manifest_label): logger.info("Backfilling label %s", tag_manifest_label.id) # Ensure that a mapping row doesn't already exist. If it does, we've been preempted. if lookup_map_row(tag_manifest_label): return False # Ensure the tag manifest has been backfilled into the manifest table. try: tmt = TagManifestToManifest.get( tag_manifest=tag_manifest_label.annotated) except TagManifestToManifest.DoesNotExist: # We'll come back to this later. logger.debug( "Tag Manifest %s for label %s has not yet been backfilled", tag_manifest_label.annotated.id, tag_manifest_label.id, ) return True repository = tag_manifest_label.repository # Create the new mapping entry and label. with db_transaction(): if lookup_map_row(tag_manifest_label): return False label = tag_manifest_label.label if tmt.manifest: try: manifest_label = ManifestLabel.create(manifest=tmt.manifest, label=label, repository=repository) TagManifestLabelMap.create( manifest_label=manifest_label, tag_manifest_label=tag_manifest_label, label=label, manifest=tmt.manifest, tag_manifest=tag_manifest_label.annotated, ) except IntegrityError: return False logger.info("Backfilled label %s", tag_manifest_label.id) return True
def delete_app_release(repo, tag_name, media_type, models_ref): """Terminate a Tag/media-type couple It find the corresponding tag/manifest and remove from the manifestlistmanifest the manifest 1. it terminates the current tag (in all-cases) 2. if the new manifestlist is not empty, it creates a new tag for it """ ManifestListManifest = models_ref.ManifestListManifest manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name media_type_id = ManifestListManifest.media_type.get_id(manifest_media_type(media_type)) with db_transaction(): tag = tag_model.get_tag(repo, tag_name, models_ref) manifest_list = tag.manifest_list list_json = manifest_list.manifest_list_json mlm_query = ManifestListManifest.select().where( ManifestListManifest.manifest_list == tag.manifest_list ) list_manifest_ids = sorted([mlm.manifest_id for mlm in mlm_query]) manifestlistmanifest = ( getattr(tag.manifest_list, manifestlistmanifest_set_name) .where(ManifestListManifest.media_type == media_type_id) .get() ) index = list_manifest_ids.index(manifestlistmanifest.manifest_id) list_manifest_ids.pop(index) list_json.pop(index) if not list_json: tag.lifetime_end = get_epoch_timestamp() tag.save() else: manifestlist = manifest_list_model.get_or_create_manifest_list( list_json, LIST_MEDIA_TYPE, SCHEMA_VERSION, models_ref ) manifest_list_model.create_manifestlistmanifest( manifestlist, list_manifest_ids, list_json, models_ref ) tag = tag_model.create_or_update_tag( repo, tag_name, models_ref, manifest_list=manifestlist, tag_kind="release" ) return tag
def assign_layer_blob(self, layer, blob, computed_checksums): """ Assigns a blob to a layer. """ assert blob assert not blob.uploading repo_image = model.image.get_image_by_db_id(layer.db_id) if repo_image is None: return None with db_transaction(): existing_storage = repo_image.storage repo_image.storage = blob._db_id repo_image.save() if existing_storage.uploading: self._builder_state.temp_storages.append(existing_storage.id) self._builder_state.checksums[layer.layer_id] = computed_checksums self._save_to_session() return True
def _create_manifest_with_temp_tag( self, repository_ref: RepositoryReference, manifest: ManifestInterface, manifest_ref: str | None = None, ) -> tuple[Manifest | None, Tag | None]: with db_disallow_replica_use(): with db_transaction(): db_manifest = oci.manifest.create_manifest( repository_ref.id, manifest) self._recalculate_repository_size(repository_ref) expiration = self._config.expiration_s or None tag = Tag.for_tag( oci.tag.create_temporary_tag_if_necessary( db_manifest, expiration), self._legacy_image_id_handler, ) wrapped_manifest = Manifest.for_manifest( db_manifest, self._legacy_image_id_handler) if not manifest.is_manifest_list: self._create_placeholder_blobs(manifest, db_manifest.id, repository_ref.id) return wrapped_manifest, tag manifests_to_connect = [] for child in manifest.child_manifests(content_retriever=None): m = oci.manifest.lookup_manifest(repository_ref.id, child.digest) if m is None: m = oci.manifest.create_manifest( repository_ref.id, child) manifests_to_connect.append(m) oci.manifest.connect_manifests(manifests_to_connect, db_manifest, repository_ref.id) for db_manifest in manifests_to_connect: oci.tag.create_temporary_tag_if_necessary( db_manifest, expiration) return wrapped_manifest, tag
def start_layer( self, layer_id, v1_metadata_string, location_name, calling_user, temp_tag_expiration ): """ Starts a new layer with the given ID to be placed into a manifest. Returns the layer started or None if an error occurred. """ # Ensure the repository still exists. repository = model.repository.lookup_repository(self._repository_ref._db_id) if repository is None: return None namespace_name = repository.namespace_user.username repo_name = repository.name try: v1_metadata = json.loads(v1_metadata_string) except ValueError: logger.exception( "Exception when trying to parse V1 metadata JSON for layer %s", layer_id ) return None except TypeError: logger.exception( "Exception when trying to parse V1 metadata JSON for layer %s", layer_id ) return None # Sanity check that the ID matches the v1 metadata. if layer_id != v1_metadata["id"]: return None # Ensure the parent already exists in the repository. parent_id = v1_metadata.get("parent", None) parent_image = None if parent_id is not None: parent_image = model.image.get_repo_image(namespace_name, repo_name, parent_id) if parent_image is None: return None # Check to see if this layer already exists in the repository. If so, we can skip the creation. existing_image = registry_model.get_legacy_image(self._repository_ref, layer_id) if existing_image is not None: self._builder_state.images[layer_id] = existing_image.id self._save_to_session() return ManifestLayer(layer_id, v1_metadata_string, existing_image.id) with db_transaction(): # Otherwise, create a new legacy image and point a temporary tag at it. created = model.image.find_create_or_link_image( layer_id, repository, calling_user, {}, location_name ) temp_tag_name = model.tag.create_temporary_hidden_tag( repository, created, temp_tag_expiration ) if temp_tag_name is None: return None # Save its V1 metadata. command_list = v1_metadata.get("container_config", {}).get("Cmd", None) command = json.dumps(command_list) if command_list else None model.image.set_image_metadata( layer_id, namespace_name, repo_name, v1_metadata.get("created"), v1_metadata.get("comment"), command, v1_metadata_string, parent=parent_image, ) # Save the changes to the builder. self._builder_state.images[layer_id] = created.id self._save_to_session() return ManifestLayer(layer_id, v1_metadata_string, created.id)
def _create_manifest( repository_id, manifest_interface_instance, storage, temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC, for_tagging=False, raise_on_error=False, retriever=None, ): # Validate the manifest. retriever = retriever or RepositoryContentRetriever.for_repository( repository_id, storage) try: manifest_interface_instance.validate(retriever) except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex: logger.exception("Could not validate manifest `%s`", manifest_interface_instance.digest) if raise_on_error: raise CreateManifestException(str(ex)) return None # Load, parse and get/create the child manifests, if any. child_manifest_refs = manifest_interface_instance.child_manifests( retriever) child_manifest_rows = {} child_manifest_label_dicts = [] if child_manifest_refs is not None: for child_manifest_ref in child_manifest_refs: # Load and parse the child manifest. try: child_manifest = child_manifest_ref.manifest_obj except ( ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError, ) as ex: logger.exception( "Could not load manifest list for manifest `%s`", manifest_interface_instance.digest, ) if raise_on_error: raise CreateManifestException(str(ex)) return None # Retrieve its labels. labels = child_manifest.get_manifest_labels(retriever) if labels is None: if raise_on_error: raise CreateManifestException( "Unable to retrieve manifest labels") logger.exception( "Could not load manifest labels for child manifest") return None # Get/create the child manifest in the database. child_manifest_info = get_or_create_manifest( repository_id, child_manifest, storage, raise_on_error=raise_on_error) if child_manifest_info is None: if raise_on_error: raise CreateManifestException( "Unable to retrieve child manifest") logger.error("Could not get/create child manifest") return None child_manifest_rows[child_manifest_info.manifest. digest] = child_manifest_info.manifest child_manifest_label_dicts.append(labels) # Build the map from required blob digests to the blob objects. blob_map = _build_blob_map( repository_id, manifest_interface_instance, retriever, storage, raise_on_error, require_empty_layer=False, ) if blob_map is None: return None # Create the manifest and its blobs. media_type = Manifest.media_type.get_id( manifest_interface_instance.media_type) storage_ids = {storage.id for storage in list(blob_map.values())} # Check for the manifest, in case it was created since we checked earlier. try: manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest) return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None) except Manifest.DoesNotExist: pass try: with db_transaction(): # Create the manifest. try: manifest = Manifest.create( repository=repository_id, digest=manifest_interface_instance.digest, media_type=media_type, manifest_bytes=manifest_interface_instance.bytes. as_encoded_str(), config_media_type=manifest_interface_instance. config_media_type, layers_compressed_size=manifest_interface_instance. layers_compressed_size, ) except IntegrityError as ie: # NOTE: An IntegrityError means (barring a bug) that the manifest was created by # another caller while we were attempting to create it. Since we need to return # the manifest, we raise a specialized exception here to break out of the # transaction so we can retrieve it. raise _ManifestAlreadyExists(ie) # Insert the blobs. blobs_to_insert = [ dict(manifest=manifest, repository=repository_id, blob=storage_id) for storage_id in storage_ids ] if blobs_to_insert: try: ManifestBlob.insert_many(blobs_to_insert).execute() except IntegrityError as ie: raise _ManifestAlreadyExists(ie) # Insert the manifest child rows (if applicable). if child_manifest_rows: children_to_insert = [ dict(manifest=manifest, child_manifest=child_manifest, repository=repository_id) for child_manifest in list(child_manifest_rows.values()) ] try: ManifestChild.insert_many(children_to_insert).execute() except IntegrityError as ie: raise _ManifestAlreadyExists(ie) # If this manifest is being created not for immediate tagging, add a temporary tag to the # manifest to ensure it isn't being GCed. If the manifest *is* for tagging, then since we're # creating a new one here, it cannot be GCed (since it isn't referenced by anything yet), so # its safe to elide the temp tag operation. If we ever change GC code to collect *all* manifests # in a repository for GC, then we will have to reevaluate this optimization at that time. if not for_tagging: create_temporary_tag_if_necessary(manifest, temp_tag_expiration_sec) # Define the labels for the manifest (if any). # TODO: Once the old data model is gone, turn this into a batch operation and make the label # application to the manifest occur under the transaction. labels = manifest_interface_instance.get_manifest_labels(retriever) if labels: for key, value in labels.items(): # NOTE: There can technically be empty label keys via Dockerfile's. We ignore any # such `labels`, as they don't really mean anything. if not key: continue media_type = "application/json" if is_json( value) else "text/plain" create_manifest_label(manifest, key, value, "manifest", media_type) # Return the dictionary of labels to apply (i.e. those labels that cause an action to be taken # on the manifest or its resulting tags). We only return those labels either defined on # the manifest or shared amongst all the child manifests. We intersect amongst all child manifests # to ensure that any action performed is defined in all manifests. labels_to_apply = labels or {} if child_manifest_label_dicts: labels_to_apply = child_manifest_label_dicts[0].items() for child_manifest_label_dict in child_manifest_label_dicts[1:]: # Intersect the key+values of the labels to ensure we get the exact same result # for all the child manifests. labels_to_apply = labels_to_apply & child_manifest_label_dict.items( ) labels_to_apply = dict(labels_to_apply) return CreatedManifest(manifest=manifest, newly_created=True, labels_to_apply=labels_to_apply) except _ManifestAlreadyExists as mae: try: manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest) except Manifest.DoesNotExist: # NOTE: If we've reached this point, then somehow we had an IntegrityError without it # being due to a duplicate manifest. We therefore log the error. logger.error( "Got integrity error when trying to create manifest: %s", mae.internal_exception) if raise_on_error: raise CreateManifestException( "Attempt to create an invalid manifest. Please report this issue." ) return None return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
def _create_manifest( repository_id, manifest_interface_instance, storage, temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC, for_tagging=False, raise_on_error=False, retriever=None, ): # Validate the manifest. retriever = retriever or RepositoryContentRetriever.for_repository( repository_id, storage) try: manifest_interface_instance.validate(retriever) except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex: logger.exception("Could not validate manifest `%s`", manifest_interface_instance.digest) if raise_on_error: raise CreateManifestException(str(ex)) return None # Load, parse and get/create the child manifests, if any. child_manifest_refs = manifest_interface_instance.child_manifests( retriever) child_manifest_rows = {} child_manifest_label_dicts = [] if child_manifest_refs is not None: for child_manifest_ref in child_manifest_refs: # Load and parse the child manifest. try: child_manifest = child_manifest_ref.manifest_obj except ( ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError, ) as ex: logger.exception( "Could not load manifest list for manifest `%s`", manifest_interface_instance.digest, ) if raise_on_error: raise CreateManifestException(str(ex)) return None # Retrieve its labels. labels = child_manifest.get_manifest_labels(retriever) if labels is None: if raise_on_error: raise CreateManifestException( "Unable to retrieve manifest labels") logger.exception( "Could not load manifest labels for child manifest") return None # Get/create the child manifest in the database. child_manifest_info = get_or_create_manifest( repository_id, child_manifest, storage, raise_on_error=raise_on_error) if child_manifest_info is None: if raise_on_error: raise CreateManifestException( "Unable to retrieve child manifest") logger.error("Could not get/create child manifest") return None child_manifest_rows[child_manifest_info.manifest. digest] = child_manifest_info.manifest child_manifest_label_dicts.append(labels) # Ensure all the blobs in the manifest exist. digests = set(manifest_interface_instance.local_blob_digests) blob_map = {} # If the special empty layer is required, simply load it directly. This is much faster # than trying to load it on a per repository basis, and that is unnecessary anyway since # this layer is predefined. if EMPTY_LAYER_BLOB_DIGEST in digests: digests.remove(EMPTY_LAYER_BLOB_DIGEST) blob_map[EMPTY_LAYER_BLOB_DIGEST] = get_shared_blob( EMPTY_LAYER_BLOB_DIGEST) if not blob_map[EMPTY_LAYER_BLOB_DIGEST]: if raise_on_error: raise CreateManifestException( "Unable to retrieve specialized empty blob") logger.warning("Could not find the special empty blob in storage") return None if digests: query = lookup_repo_storages_by_content_checksum( repository_id, digests) blob_map.update({s.content_checksum: s for s in query}) for digest_str in digests: if digest_str not in blob_map: logger.warning( "Unknown blob `%s` under manifest `%s` for repository `%s`", digest_str, manifest_interface_instance.digest, repository_id, ) if raise_on_error: raise CreateManifestException("Unknown blob `%s`" % digest_str) return None # Special check: If the empty layer blob is needed for this manifest, add it to the # blob map. This is necessary because Docker decided to elide sending of this special # empty layer in schema version 2, but we need to have it referenced for GC and schema version 1. if EMPTY_LAYER_BLOB_DIGEST not in blob_map: try: requires_empty_layer = manifest_interface_instance.get_requires_empty_layer_blob( retriever) except ManifestException as ex: if raise_on_error: raise CreateManifestException(str(ex)) return None if requires_empty_layer is None: if raise_on_error: raise CreateManifestException( "Could not load configuration blob") return None if requires_empty_layer: shared_blob = get_or_create_shared_blob(EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES, storage) assert not shared_blob.uploading assert shared_blob.content_checksum == EMPTY_LAYER_BLOB_DIGEST blob_map[EMPTY_LAYER_BLOB_DIGEST] = shared_blob # Determine and populate the legacy image if necessary. Manifest lists will not have a legacy # image. legacy_image = None if manifest_interface_instance.has_legacy_image: legacy_image_id = _populate_legacy_image(repository_id, manifest_interface_instance, blob_map, retriever, raise_on_error) if legacy_image_id is None: return None legacy_image = get_image(repository_id, legacy_image_id) if legacy_image is None: return None # Create the manifest and its blobs. media_type = Manifest.media_type.get_id( manifest_interface_instance.media_type) storage_ids = {storage.id for storage in blob_map.values()} with db_transaction(): # Check for the manifest. This is necessary because Postgres doesn't handle IntegrityErrors # well under transactions. try: manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest) return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None) except Manifest.DoesNotExist: pass # Create the manifest. try: manifest = Manifest.create( repository=repository_id, digest=manifest_interface_instance.digest, media_type=media_type, manifest_bytes=manifest_interface_instance.bytes. as_encoded_str(), ) except IntegrityError as ie: try: manifest = Manifest.get( repository=repository_id, digest=manifest_interface_instance.digest) except Manifest.DoesNotExist: logger.error( "Got integrity error when trying to create manifest: %s", ie) if raise_on_error: raise CreateManifestException( "Attempt to create an invalid manifest. Please report this issue." ) return None return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None) # Insert the blobs. blobs_to_insert = [ dict(manifest=manifest, repository=repository_id, blob=storage_id) for storage_id in storage_ids ] if blobs_to_insert: ManifestBlob.insert_many(blobs_to_insert).execute() # Set the legacy image (if applicable). if legacy_image is not None: ManifestLegacyImage.create(repository=repository_id, image=legacy_image, manifest=manifest) # Insert the manifest child rows (if applicable). if child_manifest_rows: children_to_insert = [ dict(manifest=manifest, child_manifest=child_manifest, repository=repository_id) for child_manifest in child_manifest_rows.values() ] ManifestChild.insert_many(children_to_insert).execute() # If this manifest is being created not for immediate tagging, add a temporary tag to the # manifest to ensure it isn't being GCed. If the manifest *is* for tagging, then since we're # creating a new one here, it cannot be GCed (since it isn't referenced by anything yet), so # its safe to elide the temp tag operation. If we ever change GC code to collect *all* manifests # in a repository for GC, then we will have to reevaluate this optimization at that time. if not for_tagging: create_temporary_tag_if_necessary(manifest, temp_tag_expiration_sec) # Define the labels for the manifest (if any). # TODO: Once the old data model is gone, turn this into a batch operation and make the label # application to the manifest occur under the transaction. labels = manifest_interface_instance.get_manifest_labels(retriever) if labels: for key, value in labels.iteritems(): # NOTE: There can technically be empty label keys via Dockerfile's. We ignore any # such `labels`, as they don't really mean anything. if not key: continue media_type = "application/json" if is_json(value) else "text/plain" create_manifest_label(manifest, key, value, "manifest", media_type) # Return the dictionary of labels to apply (i.e. those labels that cause an action to be taken # on the manifest or its resulting tags). We only return those labels either defined on # the manifest or shared amongst all the child manifests. We intersect amongst all child manifests # to ensure that any action performed is defined in all manifests. labels_to_apply = labels or {} if child_manifest_label_dicts: labels_to_apply = child_manifest_label_dicts[0].viewitems() for child_manifest_label_dict in child_manifest_label_dicts[1:]: # Intersect the key+values of the labels to ensure we get the exact same result # for all the child manifests. labels_to_apply = labels_to_apply & child_manifest_label_dict.viewitems( ) labels_to_apply = dict(labels_to_apply) return CreatedManifest(manifest=manifest, newly_created=True, labels_to_apply=labels_to_apply)
def _create_manifest_and_retarget_tag( self, repository_ref: RepositoryReference, manifest: ManifestInterface, tag_name: str) -> tuple[Manifest | None, Tag | None]: """ Creates a manifest in the given repository. Also creates placeholders for the objects referenced by the manifest. For manifest lists, creates placeholder sub-manifests. For regular manifests, creates placeholder blobs. Placeholder objects will be "filled" with the objects' contents on upcoming client requests, as part of the flow described in the OCI distribution specification. Returns a reference to the (created manifest, tag) or (None, None) on error. """ with db_disallow_replica_use(): with db_transaction(): db_manifest = oci.manifest.lookup_manifest(repository_ref.id, manifest.digest, allow_dead=True) if db_manifest is None: db_manifest = oci.manifest.create_manifest( repository_ref.id, manifest, raise_on_error=True) self._recalculate_repository_size(repository_ref) if db_manifest is None: return None, None # 0 means a tag never expires - if we get 0 as expiration, # we set the tag expiration to None. expiration = self._config.expiration_s or None tag = oci.tag.retarget_tag( tag_name, db_manifest, raise_on_error=True, expiration_seconds=expiration, ) if tag is None: return None, None wrapped_manifest = Manifest.for_manifest( db_manifest, self._legacy_image_id_handler) wrapped_tag = Tag.for_tag(tag, self._legacy_image_id_handler, manifest_row=db_manifest) if not manifest.is_manifest_list: self._create_placeholder_blobs(manifest, db_manifest.id, repository_ref.id) return wrapped_manifest, wrapped_tag manifests_to_connect = [] for child in manifest.child_manifests(content_retriever=None): m = oci.manifest.lookup_manifest(repository_ref.id, child.digest, allow_dead=True) if m is None: m = oci.manifest.create_manifest( repository_ref.id, child) oci.tag.create_temporary_tag_if_necessary( m, self._config.expiration_s or None) try: ManifestChild.get(manifest=db_manifest.id, child_manifest=m.id) except ManifestChild.DoesNotExist: manifests_to_connect.append(m) oci.manifest.connect_manifests(manifests_to_connect, db_manifest, repository_ref.id) return wrapped_manifest, wrapped_tag
def _update_manifest_for_tag( self, repo_ref: RepositoryReference, tag: Tag, manifest: Manifest, manifest_ref: str, create_manifest_fn, ) -> tuple[Tag, bool]: """ Updates a placeholder manifest with the given tag name. If the manifest is stale, downloads it from the upstream registry and creates a new manifest and retargets the tag. A manifest is considered stale when the manifest's digest changed in the upstream registry. A manifest is considered a placeholder when its db entry exists, but its manifest_bytes field is empty. Raises UpstreamRegistryError if the upstream registry returns anything other than 200. Raises ManifestDoesNotExist if the given manifest was not found in the database. Returns a new tag if one was created, or the existing one with a manifest freshly out of the database, and a boolean indicating whether the returned tag was newly created or not. """ upstream_manifest = None upstream_digest = self._proxy.manifest_exists(manifest_ref, ACCEPTED_MEDIA_TYPES) up_to_date = manifest.digest == upstream_digest # manifest_exists will return an empty/None digest when the upstream # registry omits the docker-content-digest header. if not upstream_digest: upstream_manifest = self._pull_upstream_manifest( repo_ref.name, manifest_ref) up_to_date = manifest.digest == upstream_manifest.digest placeholder = manifest.internal_manifest_bytes.as_unicode() == "" if up_to_date and not placeholder: return tag, False if upstream_manifest is None: upstream_manifest = self._pull_upstream_manifest( repo_ref.name, manifest_ref) self._enforce_repository_quota(repo_ref) if up_to_date and placeholder: with db_disallow_replica_use(): with db_transaction(): q = ManifestTable.update( manifest_bytes=upstream_manifest.bytes.as_unicode(), layers_compressed_size=upstream_manifest. layers_compressed_size, ).where(ManifestTable.id == manifest.id) q.execute() self._create_placeholder_blobs(upstream_manifest, manifest.id, repo_ref.id) db_tag = oci.tag.get_tag_by_manifest_id( repo_ref.id, manifest.id) self._recalculate_repository_size(repo_ref) return Tag.for_tag(db_tag, self._legacy_image_id_handler), False # if we got here, the manifest is stale, so we both create a new manifest # entry in the db, and retarget the tag. _, tag = create_manifest_fn(repo_ref, upstream_manifest, manifest_ref) return tag, True
def perform_indexing(self, start_token=None): whitelisted_namespaces = self.app.config.get( "SECURITY_SCANNER_V4_NAMESPACE_WHITELIST", []) try: indexer_state = self._secscan_api.state() except APIRequestFailure: return None def eligible_manifests(base_query): return (base_query.join(Repository).join(User).where( User.username << whitelisted_namespaces)) min_id = (start_token.min_id if start_token is not None else Manifest.select(fn.Min(Manifest.id)).scalar()) max_id = Manifest.select(fn.Max(Manifest.id)).scalar() if max_id is None or min_id is None or min_id > max_id: return None reindex_threshold = lambda: datetime.utcnow() - timedelta( seconds=self.app.config.get("SECURITY_SCANNER_V4_REINDEX_THRESHOLD" )) # TODO(alecmerdler): Filter out any `Manifests` that are still being uploaded def not_indexed_query(): return (eligible_manifests( Manifest.select()).switch(Manifest).join( ManifestSecurityStatus, JOIN.LEFT_OUTER).where(ManifestSecurityStatus.id >> None)) def index_error_query(): return (eligible_manifests(Manifest.select()).switch( Manifest).join(ManifestSecurityStatus).where( ManifestSecurityStatus.index_status == IndexStatus.FAILED, ManifestSecurityStatus.last_indexed < reindex_threshold(), )) def needs_reindexing_query(indexer_hash): return (eligible_manifests(Manifest.select()).switch( Manifest).join(ManifestSecurityStatus).where( ManifestSecurityStatus.indexer_hash != indexer_hash, ManifestSecurityStatus.last_indexed < reindex_threshold(), )) # 4^log10(total) gives us a scalable batch size into the billions. batch_size = int(4**log10(max(10, max_id - min_id))) iterator = itertools.chain( yield_random_entries( not_indexed_query, Manifest.id, batch_size, max_id, min_id, ), yield_random_entries( index_error_query, Manifest.id, batch_size, max_id, min_id, ), yield_random_entries( lambda: needs_reindexing_query(indexer_state.get("state", "")), Manifest.id, batch_size, max_id, min_id, ), ) for candidate, abt, num_remaining in iterator: manifest = ManifestDataType.for_manifest(candidate, None) layers = registry_model.list_manifest_layers( manifest, self.storage, True) logger.debug("Indexing %s/%s@%s" % (candidate.repository.namespace_user, candidate.repository.name, manifest.digest)) try: (report, state) = self._secscan_api.index(manifest, layers) except APIRequestFailure: logger.exception( "Failed to perform indexing, security scanner API error") return None with db_transaction(): ManifestSecurityStatus.delete().where( ManifestSecurityStatus.manifest == candidate).execute() ManifestSecurityStatus.create( manifest=candidate, repository=candidate.repository, error_json=report["err"], index_status=(IndexStatus.FAILED if report["state"] == IndexReportState.Index_Error else IndexStatus.COMPLETED), indexer_hash=state, indexer_version=IndexerVersion.V4, metadata_json={}, ) return ScanToken(max_id + 1)