def validate_reset_code(token): result = decode_public_private_token(token) if not result: return None # Find the reset code. try: code = EmailConfirmation.get( EmailConfirmation.code == result.public_code, EmailConfirmation.pw_reset == True) except EmailConfirmation.DoesNotExist: return None if result.private_token and not code.verification_code.matches( result.private_token): return None # Make sure the code is not expired. max_lifetime_duration = convert_to_timedelta( config.app_config["USER_RECOVERY_TOKEN_LIFETIME"]) if code.created + max_lifetime_duration < datetime.now(): code.delete_instance() return None # Verify the user and return the code. user = code.user with db_transaction(): if not user.verified: user.verified = True user.save() code.delete_instance() return user
def create_temporary_hidden_tag(repo, image_obj, expiration_s): """ Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name of the temporary tag or None on error. """ now_ts = get_epoch_timestamp() expire_ts = now_ts + expiration_s tag_name = str(uuid4()) # Ensure the repository is not marked for deletion. with db_transaction(): current = Repository.get(id=repo) if current.state == RepositoryState.MARKED_FOR_DELETION: return None RepositoryTag.create( repository=repo, image=image_obj, name=tag_name, lifetime_start_ts=now_ts, lifetime_end_ts=expire_ts, hidden=True, ) return tag_name
def change_username(user_id, new_username): (username_valid, username_issue) = validate_username(new_username) if not username_valid: raise InvalidUsernameException("Invalid username %s: %s" % (new_username, username_issue)) with db_transaction(): # Reload the user for update user = db_for_update(User.select().where(User.id == user_id)).get() # Rename the robots for robot in db_for_update( _list_entity_robots(user.username, include_metadata=False, include_token=False)): _, robot_shortname = parse_robot_username(robot.username) new_robot_name = format_robot_username(new_username, robot_shortname) robot.username = new_robot_name robot.save() # Rename the user user.username = new_username user.save() # Remove any prompts for username. remove_user_prompt(user, "confirm_username") return user
def _purge_oci_tag(tag, context, allow_non_expired=False): assert tag.repository_id == context.repository.id if not allow_non_expired: assert tag.lifetime_end_ms is not None assert tag.lifetime_end_ms <= oci_tag.get_epoch_timestamp_ms() # Add the manifest to be GCed. context.add_manifest_id(tag.manifest_id) with db_transaction(): # Reload the tag and verify its lifetime_end_ms has not changed. try: reloaded_tag = db_for_update(Tag.select().where(Tag.id == tag.id)).get() except Tag.DoesNotExist: return False assert reloaded_tag.id == tag.id assert reloaded_tag.repository_id == context.repository.id if reloaded_tag.lifetime_end_ms != tag.lifetime_end_ms: return False # Delete mapping rows. TagToRepositoryTag.delete().where(TagToRepositoryTag.tag == tag).execute() # Delete the tag. tag.delete_instance()
def _check_image_used(legacy_image_id): assert legacy_image_id is not None with db_transaction(): # Check if the image is referenced by a manifest. try: ManifestLegacyImage.select().where(ManifestLegacyImage.image == legacy_image_id).get() return True except ManifestLegacyImage.DoesNotExist: pass # Check if the image is referenced by a tag. try: RepositoryTag.select().where(RepositoryTag.image == legacy_image_id).get() return True except RepositoryTag.DoesNotExist: pass # Check if the image is referenced by another image. try: Image.select().where(Image.parent == legacy_image_id).get() return True except Image.DoesNotExist: pass return False
def create_organization(name, email, creating_user, email_required=True, is_possible_abuser=False): with db_transaction(): try: # Create the org new_org = user.create_user_noverify( name, email, email_required=email_required, is_possible_abuser=is_possible_abuser) new_org.organization = True new_org.save() # Create a team for the owners owners_team = team.create_team('owners', new_org, 'admin') # Add the user who created the org to the owners team team.add_user_to_team(creating_user, owners_team) return new_org except InvalidUsernameException as iue: raise InvalidOrganizationException(iue.message)
def remove_organization_member(org, user_obj): org_admins = [u.username for u in __get_org_admin_users(org)] if len(org_admins) == 1 and user_obj.username in org_admins: raise DataModelException( 'Cannot remove user as they are the only organization admin') with db_transaction(): # Find and remove the user from any repositories under the org. permissions = list( RepositoryPermission.select( RepositoryPermission.id).join(Repository).where( Repository.namespace_user == org, RepositoryPermission.user == user_obj)) if permissions: RepositoryPermission.delete().where( RepositoryPermission.id << permissions).execute() # Find and remove the user from any teams under the org. members = list( TeamMember.select(TeamMember.id).join(Team).where( Team.organization == org, TeamMember.user == user_obj)) if members: TeamMember.delete().where(TeamMember.id << members).execute()
def convert_user_to_organization(user_obj, admin_user): if user_obj.robot: raise DataModelException('Cannot convert a robot into an organization') with db_transaction(): # Change the user to an organization and disable this account for login. user_obj.organization = True user_obj.password_hash = None user_obj.save() # Clear any federated auth pointing to this user. FederatedLogin.delete().where( FederatedLogin.user == user_obj).execute() # Delete any user-specific permissions on repositories. (RepositoryPermission.delete().where( RepositoryPermission.user == user_obj).execute()) # Create a team for the owners owners_team = team.create_team('owners', user_obj, 'admin') # Add the user who will admin the org to the owners team team.add_user_to_team(admin_user, owners_team) return user_obj
def create_repository(namespace, name, creating_user, visibility='private', repo_kind='image', description=None): namespace_user = User.get(username=namespace) yesterday = datetime.now() - timedelta(days=1) with db_transaction(): repo = Repository.create( name=name, visibility=Repository.visibility.get_id(visibility), namespace_user=namespace_user, kind=Repository.kind.get_id(repo_kind), description=description) RepositoryActionCount.create(repository=repo, count=0, date=yesterday) RepositorySearchScore.create(repository=repo, score=0) # Note: We put the admin create permission under the transaction to ensure it is created. if creating_user and not creating_user.organization: admin = Role.get(name='admin') RepositoryPermission.create(user=creating_user, repository=repo, role=admin) # Apply default permissions (only occurs for repositories under organizations) if creating_user and not creating_user.organization and creating_user.username != namespace: permission.apply_default_permissions(repo, creating_user) return repo
def regenerate_robot_token(robot_shortname, parent): robot_username = format_robot_username(parent.username, robot_shortname) robot, metadata = lookup_robot_and_metadata(robot_username) password = random_string_generator(length=64)() robot.email = str(uuid4()) robot.uuid = str(uuid4()) service = LoginService.get(name="quayrobot") login = FederatedLogin.get(FederatedLogin.user == robot, FederatedLogin.service == service) login.service_ident = "robot:%s" % (robot.id) try: token_data = RobotAccountToken.get(robot_account=robot) except RobotAccountToken.DoesNotExist: token_data = RobotAccountToken.create(robot_account=robot) token_data.token = password with db_transaction(): token_data.save() login.save() robot.save() return robot, password, metadata
def mark_namespace_for_deletion(user, queues, namespace_gc_queue, force=False): """ Marks a namespace (as referenced by the given user) for deletion. A queue item will be added to delete the namespace's repositories and storage, while the namespace itself will be renamed, disabled, and delinked from other tables. """ if not user.enabled: return None if not force and not user.organization: # Ensure that the user is not the sole admin for any organizations. If so, then the user # cannot be deleted before those organizations are deleted or reassigned. organizations = get_solely_admined_organizations(user) if len(organizations) > 0: message = ( "Cannot delete %s as you are the only admin for organizations: " % user.username) for index, org in enumerate(organizations): if index > 0: message = message + ", " message = message + org.username raise DataModelException(message) # Delete all queue items for the user. for queue in queues: queue.delete_namespaced_items(user.username) # Delete non-repository related items. This operation is very quick, so we can do so here. _delete_user_linked_data(user) with db_transaction(): original_username = user.username user = db_for_update(User.select().where(User.id == user.id)).get() # Mark the namespace as deleted and ready for GC. try: marker = DeletedNamespace.create( namespace=user, original_username=original_username, original_email=user.email) except IntegrityError: return # Disable the namespace itself, and replace its various unique fields with UUIDs. user.enabled = False user.username = str(uuid4()) user.email = str(uuid4()) user.save() # Add a queueitem to delete the namespace. marker.queue_id = namespace_gc_queue.put( [str(user.id)], json.dumps({ "marker_id": marker.id, "original_username": original_username, }), ) marker.save() return marker.id
def confirm_user_email(token): result = decode_public_private_token(token) if not result: raise DataModelException("Invalid email confirmation code") try: code = EmailConfirmation.get( EmailConfirmation.code == result.public_code, EmailConfirmation.email_confirm == True) except EmailConfirmation.DoesNotExist: raise DataModelException("Invalid email confirmation code") if result.private_token and not code.verification_code.matches( result.private_token): raise DataModelException("Invalid email confirmation code") user = code.user user.verified = True old_email = None new_email = code.new_email if new_email and new_email != old_email: if find_user_by_email(new_email): raise DataModelException("E-mail address already used") old_email = user.email user.email = new_email with db_transaction(): user.save() code.delete_instance() return user, new_email, old_email
def _garbage_collect_legacy_image(legacy_image_id, context): assert legacy_image_id is not None # Check if the image is referenced. if _check_image_used(legacy_image_id): return False # We have an unreferenced image. We can now delete it. # Grab any derived storage for the image. for derived in DerivedStorageForImage.select().where( DerivedStorageForImage.source_image == legacy_image_id): context.add_blob_id(derived.derivative_id) try: image = Image.select().where(Image.id == legacy_image_id).get() except Image.DoesNotExist: return False assert image.repository_id == context.repository.id # Add the image's blob to be GCed. context.add_blob_id(image.storage_id) # If the image has a parent ID, add the parent for GC. if image.parent_id is not None: context.add_legacy_image_id(image.parent_id) # Delete the image. with db_transaction(): if _check_image_used(legacy_image_id): return False try: image = Image.select().where(Image.id == legacy_image_id).get() except Image.DoesNotExist: return False assert image.id == legacy_image_id assert image.repository_id == context.repository.id # Delete any derived storage for the image. deleted_derived_storage = (DerivedStorageForImage.delete().where( DerivedStorageForImage.source_image == legacy_image_id).execute()) # Delete the image itself. image.delete_instance() context.mark_legacy_image_removed(image) gc_table_rows_deleted.labels(table="Image").inc() gc_table_rows_deleted.labels( table="DerivedStorageForImage").inc(deleted_derived_storage) if config.image_cleanup_callbacks: for callback in config.image_cleanup_callbacks: callback([image]) return True
def _associate_manifest(tag, oci_manifest): with db_transaction(): tag_manifest = TagManifest.create( tag=tag, digest=oci_manifest.digest, json_data=oci_manifest.manifest_bytes) TagManifestToManifest.create(tag_manifest=tag_manifest, manifest=oci_manifest) return tag_manifest
def find_create_or_link_image( docker_image_id, repo_obj, username, translations, preferred_location ): # First check for the image existing in the repository. If found, we simply return it. repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name, docker_image_id) if repo_image: return repo_image # We next check to see if there is an existing storage the new image can link to. existing_image_query = ( Image.select(Image, ImageStorage) .distinct() .join(ImageStorage) .switch(Image) .join(Repository) .join(RepositoryPermission, JOIN.LEFT_OUTER) .switch(Repository) .join(Namespace, on=(Repository.namespace_user == Namespace.id)) .where(Image.docker_image_id == docker_image_id) ) existing_image_query = _basequery.filter_to_repos_for_user( existing_image_query, _namespace_id_for_username(username) ) # If there is an existing image, we try to translate its ancestry and copy its storage. new_image = None try: logger.debug("Looking up existing image for ID: %s", docker_image_id) existing_image = existing_image_query.get() logger.debug("Existing image %s found for ID: %s", existing_image.id, docker_image_id) new_image = _find_or_link_image( existing_image, repo_obj, username, translations, preferred_location ) if new_image: return new_image except Image.DoesNotExist: logger.debug("No existing image found for ID: %s", docker_image_id) # Otherwise, create a new storage directly. with db_transaction(): # Final check for an existing image, under the transaction. repo_image = get_repo_image( repo_obj.namespace_user.username, repo_obj.name, docker_image_id ) if repo_image: return repo_image logger.debug("Creating new storage for docker id: %s", docker_image_id) new_storage = storage.create_v1_storage(preferred_location) return Image.create( docker_image_id=docker_image_id, repository=repo_obj, storage=new_storage, ancestors="/" )
def create_repository(namespace, name, creating_user, visibility="private", repo_kind="image", description=None): namespace_user = User.get(username=namespace) yesterday = datetime.now() - timedelta(days=1) try: with db_transaction(): # Check if the repository exists to avoid an IntegrityError if possible. existing = get_repository(namespace, name) if existing is not None: return None try: repo = Repository.create( name=name, visibility=Repository.visibility.get_id(visibility), namespace_user=namespace_user, kind=Repository.kind.get_id(repo_kind), description=description, ) except IntegrityError as ie: raise _RepositoryExistsException(ie) RepositoryActionCount.create(repository=repo, count=0, date=yesterday) RepositorySearchScore.create(repository=repo, score=0) # Note: We put the admin create permission under the transaction to ensure it is created. if creating_user and not creating_user.organization: admin = Role.get(name="admin") RepositoryPermission.create(user=creating_user, repository=repo, role=admin) except _RepositoryExistsException as ree: try: return Repository.get(namespace_user=namespace_user, name=name) except Repository.DoesNotExist: logger.error( "Got integrity error when trying to create repository %s/%s: %s", namespace, name, ree.internal_exception, ) return None # Apply default permissions (only occurs for repositories under organizations) if creating_user and not creating_user.organization and creating_user.username != namespace: permission.apply_default_permissions(repo, creating_user) return repo
def _delete_user_linked_data(user): if user.organization: # Delete the organization's teams. with db_transaction(): for team in Team.select().where(Team.organization == user): team.delete_instance(recursive=True) # Delete any OAuth approvals and tokens associated with the user. with db_transaction(): for app in OAuthApplication.select().where( OAuthApplication.organization == user): app.delete_instance(recursive=True) else: # Remove the user from any teams in which they are a member. TeamMember.delete().where(TeamMember.user == user).execute() # Delete any repository buildtriggers where the user is the connected user. with db_transaction(): triggers = RepositoryBuildTrigger.select().where( RepositoryBuildTrigger.connected_user == user) for trigger in triggers: trigger.delete_instance(recursive=True, delete_nullable=False) # Delete any mirrors with robots owned by this user. with db_transaction(): robots = list(list_namespace_robots(user.username)) RepoMirrorConfig.delete().where( RepoMirrorConfig.internal_robot << robots).execute() # Delete any robots owned by this user. with db_transaction(): robots = list(list_namespace_robots(user.username)) for robot in robots: robot.delete_instance(recursive=True, delete_nullable=True) # Null out any service key approvals. We technically lose information here, but its better than # falling and only occurs if a superuser is being deleted. ServiceKeyApproval.update(approver=None).where( ServiceKeyApproval.approver == user).execute() # Delete any federated user links. FederatedLogin.delete().where(FederatedLogin.user == user).execute()
def update_service_key(kid, name=None, metadata=None): try: with db_transaction(): key = db_for_update(ServiceKey.select().where(ServiceKey.kid == kid)).get() if name is not None: key.name = name if metadata is not None: key.metadata.update(metadata) key.save() except ServiceKey.DoesNotExist: raise ServiceKeyDoesNotExist
def set_image_metadata( docker_image_id, namespace_name, repository_name, created_date_str, comment, command, v1_json_metadata, parent=None, ): """ Sets metadata that is specific to how a binary piece of storage fits into the layer tree. """ with db_transaction(): try: fetched = ( Image.select(Image, ImageStorage) .join(Repository) .join(Namespace, on=(Repository.namespace_user == Namespace.id)) .switch(Image) .join(ImageStorage) .where( Repository.name == repository_name, Namespace.username == namespace_name, Image.docker_image_id == docker_image_id, ) .get() ) except Image.DoesNotExist: raise DataModelException("No image with specified id and repository") fetched.created = datetime.now() if created_date_str is not None: try: fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None) except: # parse raises different exceptions, so we cannot use a specific kind of handler here. pass # We cleanup any old checksum in case it's a retry after a fail fetched.v1_checksum = None fetched.comment = comment fetched.command = command fetched.v1_json_metadata = v1_json_metadata if parent: fetched.ancestors = "%s%s/" % (parent.ancestors, parent.id) fetched.parent = parent fetched.save() return fetched
def ensure_image_locations(*names): with db_transaction(): locations = ImageStorageLocation.select().where(ImageStorageLocation.name << names) insert_names = list(names) for location in locations: insert_names.remove(location.name) if not insert_names: return data = [{"name": name} for name in insert_names] ImageStorageLocation.insert_many(data).execute()
def temp_link_blob(repository_id, blob_digest, link_expiration_s): """ Temporarily links to the blob record from the given repository. If the blob record is not found, return None. """ assert blob_digest with db_transaction(): try: storage = ImageStorage.get(content_checksum=blob_digest) except ImageStorage.DoesNotExist: return None _temp_link_blob(repository_id, storage, link_expiration_s) return storage
def _find_or_link_image(existing_image, repo_obj, username, translations, preferred_location): with db_transaction(): # Check for an existing image, under the transaction, to make sure it doesn't already exist. repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name, existing_image.docker_image_id) if repo_image: return repo_image # Make sure the existing base image still exists. try: to_copy = Image.select().join(ImageStorage).where( Image.id == existing_image.id).get() msg = "Linking image to existing storage with docker id: %s and uuid: %s" logger.debug(msg, existing_image.docker_image_id, to_copy.storage.uuid) new_image_ancestry = __translate_ancestry(to_copy.ancestors, translations, repo_obj, username, preferred_location) copied_storage = to_copy.storage translated_parent_id = None if new_image_ancestry != "/": translated_parent_id = int(new_image_ancestry.split("/")[-2]) new_image = Image.create( docker_image_id=existing_image.docker_image_id, repository=repo_obj, storage=copied_storage, ancestors=new_image_ancestry, command=existing_image.command, created=existing_image.created, comment=existing_image.comment, v1_json_metadata=existing_image.v1_json_metadata, aggregate_size=existing_image.aggregate_size, parent=translated_parent_id, v1_checksum=existing_image.v1_checksum, ) logger.debug("Storing translation %s -> %s", existing_image.id, new_image.id) translations[existing_image.id] = new_image.id return new_image except Image.DoesNotExist: return None
def _garbage_collect_label(label_id, context): assert label_id is not None # We can now delete the label. with db_transaction(): if _check_label_used(label_id): return False result = Label.delete().where(Label.id == label_id).execute() == 1 if result: context.mark_label_id_removed(label_id) return result
def replace_service_key(old_kid, kid, jwk, metadata, expiration_date): try: with db_transaction(): key = db_for_update(ServiceKey.select().where(ServiceKey.kid == old_kid)).get() key.metadata.update(metadata) ServiceKey.create(name=key.name, kid=kid, service=key.service, jwk=jwk, metadata=key.metadata, expiration_date=expiration_date, rotation_duration=key.rotation_duration, approval=key.approval) key.delete_instance() except ServiceKey.DoesNotExist: raise ServiceKeyDoesNotExist _notify_superusers(key) delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(old_kid)) _gc_expired(key.service)
def approve_service_key(kid, approval_type, approver=None, notes=''): try: with db_transaction(): key = db_for_update(ServiceKey.select().where(ServiceKey.kid == kid)).get() if key.approval is not None: raise ServiceKeyAlreadyApproved approval = ServiceKeyApproval.create(approver=approver, approval_type=approval_type, notes=notes) key.approval = approval key.save() except ServiceKey.DoesNotExist: raise ServiceKeyDoesNotExist delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(kid)) return key
def ensure_blob_locations(models_ref, *names): BlobPlacementLocation = models_ref.BlobPlacementLocation with db_transaction(): locations = BlobPlacementLocation.select().where(BlobPlacementLocation.name << names) insert_names = list(names) for location in locations: insert_names.remove(location.name) if not insert_names: return data = [{"name": name} for name in insert_names] BlobPlacementLocation.insert_many(data).execute()
def store_blob_record_and_temp_link_in_repo( repository_id, blob_digest, location_obj, byte_count, link_expiration_s, uncompressed_byte_count=None, ): """ Store a record of the blob and temporarily link it to the specified repository. """ assert blob_digest assert byte_count is not None with db_transaction(): try: storage = ImageStorage.get(content_checksum=blob_digest) save_changes = False if storage.image_size is None: storage.image_size = byte_count save_changes = True if storage.uncompressed_size is None and uncompressed_byte_count is not None: storage.uncompressed_size = uncompressed_byte_count save_changes = True if save_changes: storage.save() ImageStoragePlacement.get(storage=storage, location=location_obj) except ImageStorage.DoesNotExist: storage = ImageStorage.create( content_checksum=blob_digest, uploading=False, image_size=byte_count, uncompressed_size=uncompressed_byte_count, ) ImageStoragePlacement.create(storage=storage, location=location_obj) except ImageStoragePlacement.DoesNotExist: ImageStoragePlacement.create(storage=storage, location=location_obj) _temp_link_blob(repository_id, storage, link_expiration_s) return storage
def update_trigger_disable_status(trigger, final_phase): """ Updates the disable status of the given build trigger. If the build trigger had a failure, then the counter is increased and, if we've reached the limit, the trigger is automatically disabled. Otherwise, if the trigger succeeded, it's counter is reset. This ensures that triggers that continue to error are eventually automatically disabled. """ with db_transaction(): try: trigger = RepositoryBuildTrigger.get(id=trigger.id) except RepositoryBuildTrigger.DoesNotExist: # Already deleted. return # If the build completed successfully, then reset the successive counters. if final_phase == BUILD_PHASE.COMPLETE: trigger.successive_failure_count = 0 trigger.successive_internal_error_count = 0 trigger.save() return # Otherwise, increment the counters and check for trigger disable. if final_phase == BUILD_PHASE.ERROR: trigger.successive_failure_count = trigger.successive_failure_count + 1 trigger.successive_internal_error_count = 0 elif final_phase == BUILD_PHASE.INTERNAL_ERROR: trigger.successive_internal_error_count = trigger.successive_internal_error_count + 1 # Check if we need to disable the trigger. failure_threshold = config.app_config.get( "SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD") error_threshold = config.app_config.get( "SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD") if failure_threshold and trigger.successive_failure_count >= failure_threshold: toggle_build_trigger(trigger, False, TRIGGER_DISABLE_REASON.BUILD_FALURES) elif error_threshold and trigger.successive_internal_error_count >= error_threshold: toggle_build_trigger(trigger, False, TRIGGER_DISABLE_REASON.INTERNAL_ERRORS) else: # Save the trigger changes. trigger.save()
def _garbage_collect_legacy_manifest(legacy_manifest_id, context): assert legacy_manifest_id is not None # Add the labels to be GCed. query = TagManifestLabel.select().where( TagManifestLabel.annotated == legacy_manifest_id) for manifest_label in query: context.add_label_id(manifest_label.label_id) # Delete the tag manifest. with db_transaction(): try: tag_manifest = TagManifest.select().where( TagManifest.id == legacy_manifest_id).get() except TagManifest.DoesNotExist: return False assert tag_manifest.id == legacy_manifest_id assert tag_manifest.tag.repository_id == context.repository.id # Delete any label mapping rows. (TagManifestLabelMap.delete().where( TagManifestLabelMap.tag_manifest == legacy_manifest_id).execute()) # Delete the label rows. TagManifestLabel.delete().where( TagManifestLabel.annotated == legacy_manifest_id).execute() # Delete the mapping row if it exists. try: tmt = (TagManifestToManifest.select().where( TagManifestToManifest.tag_manifest == tag_manifest).get()) context.add_manifest_id(tmt.manifest_id) tmt_deleted = tmt.delete_instance() if tmt_deleted: gc_table_rows_deleted.labels( table="TagManifestToManifest").inc() except TagManifestToManifest.DoesNotExist: pass # Delete the tag manifest. tag_manifest_deleted = tag_manifest.delete_instance() if tag_manifest_deleted: gc_table_rows_deleted.labels(table="TagManifest").inc() return True
def _check_label_used(label_id): assert label_id is not None with db_transaction(): # Check if the label is referenced by another manifest or tag manifest. try: ManifestLabel.select().where(ManifestLabel.label == label_id).get() return True except ManifestLabel.DoesNotExist: pass try: TagManifestLabel.select().where(TagManifestLabel.label == label_id).get() return True except TagManifestLabel.DoesNotExist: pass return False