def get_stale_blob_upload(stale_timespan): """ Returns a random blob upload which was created before the stale timespan. """ stale_threshold = datetime.now() - stale_timespan try: candidates = ( BlobUpload.select() .where(BlobUpload.created <= stale_threshold) .limit(500) .distinct() .alias("candidates") ) found = ( BlobUpload.select(candidates.c.id).from_(candidates).order_by(db_random_func()).get() ) if not found: return None return ( BlobUpload.select(BlobUpload, ImageStorageLocation) .join(ImageStorageLocation) .where(BlobUpload.id == found.id) .get() ) except BlobUpload.DoesNotExist: return None
def initiate_upload_for_repo(repo, uuid, location_name, storage_metadata): """ Initiates a blob upload for a specific repository object, in a specific location. """ location = storage_model.get_image_location_for_name(location_name) return BlobUpload.create(repository=repo, location=location.id, uuid=uuid, storage_metadata=storage_metadata)
def get_blob_upload_by_uuid(upload_uuid): """ Loads the upload with the given UUID, if any. """ try: return (BlobUpload.select().where( BlobUpload.uuid == upload_uuid).get()) except BlobUpload.DoesNotExist: return None
def get_blob_upload(namespace, repo_name, upload_uuid): """ Load the upload which is already in progress. """ try: return (BlobUpload.select(BlobUpload, ImageStorageLocation).join( ImageStorageLocation).switch(BlobUpload).join(Repository).join( Namespace, on=(Namespace.id == Repository.namespace_user)).where( Repository.name == repo_name, Namespace.username == namespace, BlobUpload.uuid == upload_uuid).get()) except BlobUpload.DoesNotExist: raise InvalidBlobUpload()
def get_stale_blob_upload(stale_timespan): """ Returns a blob upload which was created before the stale timespan. """ stale_threshold = datetime.now() - stale_timespan try: candidates = (BlobUpload.select( BlobUpload, ImageStorageLocation).join(ImageStorageLocation).where( BlobUpload.created <= stale_threshold)) return candidates.get() except BlobUpload.DoesNotExist: return None
def get_size_during_upload(repo_id: int): # TODO: Make this one trip to the db instead of 2? size = get_repository_size_and_cache(repo_id) query = (BlobUpload.select( fn.Sum(BlobUpload.byte_count).alias("size_bytes")).where( BlobUpload.repository_id == repo_id)).get() repo_size = 0 size_bytes = 0 if size["repository_size"] is not None: repo_size = size["repository_size"] if query.size_bytes is not None: size_bytes = query.size_bytes size_bytes = repo_size + size_bytes return size_bytes