def create_repository(namespace, name, creating_user, visibility="private", repo_kind="image", description=None): namespace_user = User.get(username=namespace) yesterday = datetime.now() - timedelta(days=1) with db_transaction(): # Check if the repository exists to avoid an IntegrityError if possible. existing = get_repository(namespace, name) if existing is not None: return None try: repo = Repository.create( name=name, visibility=Repository.visibility.get_id(visibility), namespace_user=namespace_user, kind=Repository.kind.get_id(repo_kind), description=description, ) except IntegrityError as ie: # NOTE: This is a just-in-case fallback. try: Repository.get(namespace_user=namespace_user, name=name) return None except Repository.DoesNotExist: logger.error( "Got integrity error when trying to create repository %s/%s: %s", namespace, name, ie, ) return None RepositoryActionCount.create(repository=repo, count=0, date=yesterday) RepositorySearchScore.create(repository=repo, score=0) # Note: We put the admin create permission under the transaction to ensure it is created. if creating_user and not creating_user.organization: admin = Role.get(name="admin") RepositoryPermission.create(user=creating_user, repository=repo, role=admin) # Apply default permissions (only occurs for repositories under organizations) if creating_user and not creating_user.organization and creating_user.username != namespace: permission.apply_default_permissions(repo, creating_user) return repo
def _garbage_collection_repos(self, skip_lock_for_testing=False): """ Performs garbage collection on repositories. """ with UseThenDisconnect(app.config): policy = get_random_gc_policy() if policy is None: logger.debug("No GC policies found") return repo_ref = registry_model.find_repository_with_garbage(policy) if repo_ref is None: logger.debug("No repository with garbage found") return assert features.GARBAGE_COLLECTION try: with GlobalLock( "REPO_GARBAGE_COLLECTION_%s" % repo_ref.id, lock_ttl=REPOSITORY_GC_TIMEOUT + LOCK_TIMEOUT_PADDING, ) if not skip_lock_for_testing else empty_context(): try: repository = Repository.get(id=repo_ref.id) except Repository.DoesNotExist: return logger.debug("Starting GC of repository #%s (%s)", repository.id, repository.name) garbage_collect_repo(repository) logger.debug("Finished GC of repository #%s (%s)", repository.id, repository.name) except LockNotAcquiredException: logger.debug( "Could not acquire repo lock for garbage collection")
def find_repository_with_garbage(limit_to_gc_policy_s): expiration_timestamp = get_epoch_timestamp() - limit_to_gc_policy_s try: candidates = (RepositoryTag.select( RepositoryTag.repository).join(Repository).join( Namespace, on=(Repository.namespace_user == Namespace.id)).where( ~(RepositoryTag.lifetime_end_ts >> None), (RepositoryTag.lifetime_end_ts <= expiration_timestamp), (Namespace.removed_tag_expiration_s == limit_to_gc_policy_s), ).limit(500).distinct().alias("candidates")) found = (RepositoryTag.select( candidates.c.repository_id).from_(candidates).order_by( db_random_func()).get()) if found is None: return return Repository.get(Repository.id == found.repository_id) except RepositoryTag.DoesNotExist: return None except Repository.DoesNotExist: return None
def find_repository_with_garbage(limit_to_gc_policy_s): """ Returns a repository that has garbage (defined as an expired Tag that is past the repo's namespace's expiration window) or None if none. """ expiration_timestamp = get_epoch_timestamp_ms() - (limit_to_gc_policy_s * 1000) try: candidates = (Tag.select(Tag.repository).join(Repository).join( Namespace, on=(Repository.namespace_user == Namespace.id)).where( ~(Tag.lifetime_end_ms >> None), (Tag.lifetime_end_ms <= expiration_timestamp), (Namespace.removed_tag_expiration_s == limit_to_gc_policy_s), (Namespace.enabled == True), (Repository.state != RepositoryState.MARKED_FOR_DELETION), ).limit(GC_CANDIDATE_COUNT).distinct().alias("candidates")) found = (Tag.select( candidates.c.repository_id).from_(candidates).order_by( db_random_func()).get()) if found is None: return return Repository.get(Repository.id == found.repository_id) except Tag.DoesNotExist: return None except Repository.DoesNotExist: return None
def create_temporary_hidden_tag(repo, image_obj, expiration_s): """ Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name of the temporary tag or None on error. """ now_ts = get_epoch_timestamp() expire_ts = now_ts + expiration_s tag_name = str(uuid4()) # Ensure the repository is not marked for deletion. with db_transaction(): current = Repository.get(id=repo) if current.state == RepositoryState.MARKED_FOR_DELETION: return None RepositoryTag.create( repository=repo, image=image_obj, name=tag_name, lifetime_start_ts=now_ts, lifetime_end_ts=expire_ts, hidden=True, ) return tag_name
def purge_repository(repo, force=False): """ Completely delete all traces of the repository. Will return True upon complete success, and False upon partial or total failure. Garbage collection is incremental and repeatable, so this return value does not need to be checked or responded to. """ assert repo.state == RepositoryState.MARKED_FOR_DELETION or force # Delete the repository of all Appr-referenced entries. # Note that new-model Tag's must be deleted in *two* passes, as they can reference parent tags, # and MySQL is... particular... about such relationships when deleting. if repo.kind.name == "application": ApprTag.delete().where(ApprTag.repository == repo, ~(ApprTag.linked_tag >> None)).execute() ApprTag.delete().where(ApprTag.repository == repo).execute() else: # GC to remove the images and storage. _purge_repository_contents(repo) # Ensure there are no additional tags, manifests, images or blobs in the repository. assert ApprTag.select().where(ApprTag.repository == repo).count() == 0 assert Tag.select().where(Tag.repository == repo).count() == 0 assert RepositoryTag.select().where( RepositoryTag.repository == repo).count() == 0 assert Manifest.select().where(Manifest.repository == repo).count() == 0 assert ManifestBlob.select().where( ManifestBlob.repository == repo).count() == 0 assert Image.select().where(Image.repository == repo).count() == 0 # Delete any repository build triggers, builds, and any other large-ish reference tables for # the repository. _chunk_delete_all(repo, RepositoryPermission, force=force) _chunk_delete_all(repo, RepositoryBuild, force=force) _chunk_delete_all(repo, RepositoryBuildTrigger, force=force) _chunk_delete_all(repo, RepositoryActionCount, force=force) _chunk_delete_all(repo, Star, force=force) _chunk_delete_all(repo, AccessToken, force=force) _chunk_delete_all(repo, RepositoryNotification, force=force) _chunk_delete_all(repo, BlobUpload, force=force) _chunk_delete_all(repo, RepoMirrorConfig, force=force) _chunk_delete_all(repo, RepositoryAuthorizedEmail, force=force) # Delete any marker rows for the repository. DeletedRepository.delete().where( DeletedRepository.repository == repo).execute() # Delete the rest of the repository metadata. try: # Make sure the repository still exists. fetched = Repository.get(id=repo.id) except Repository.DoesNotExist: return False fetched.delete_instance(recursive=True, delete_nullable=False, force=force) return True
def _temp_link_blob(repository_id, storage, link_expiration_s): """ Note: Should *always* be called by a parent under a transaction. """ try: repository = Repository.get(id=repository_id) except Repository.DoesNotExist: return None if repository.state == RepositoryState.MARKED_FOR_DELETION: return None return UploadedBlob.create( repository=repository_id, blob=storage, expires_at=datetime.utcnow() + timedelta(seconds=link_expiration_s), )
def lookup_secscan_notification_severities(repository_id): """ Returns the configured security scanner notification severities for the repository or None if none. """ try: repo = Repository.get(id=repository_id) except Repository.DoesNotExist: return None event_kind = ExternalNotificationEvent.get(name="vulnerability_found") for event in RepositoryNotification.select().where( RepositoryNotification.repository == repository_id, RepositoryNotification.event == event_kind, ): severity = json.loads(event.event_config_json).get( "vulnerability", {}).get("priority") if severity: yield severity
def lookup_repository(repo_id): try: return Repository.get(Repository.id == repo_id) except Repository.DoesNotExist: return None