Ejemplo n.º 1
0
def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
    # Ensure the digest is present before proceeding.
    digest = request.args.get("digest", None)
    if digest is None:
        raise BlobUploadInvalid(detail={"reason": "Missing digest arg on monolithic upload"})

    # Find the upload.
    repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
    if repository_ref is None:
        raise NameUnknown()

    uploader = retrieve_blob_upload_manager(
        repository_ref, upload_uuid, storage, _upload_settings()
    )
    if uploader is None:
        raise BlobUploadUnknown()

    # Upload the chunk for the blob and commit it once complete.
    with complete_when_uploaded(uploader):
        _upload_chunk(uploader, digest)

    # Write the response to the client.
    return Response(
        status=201,
        headers={
            "Docker-Content-Digest": digest,
            "Location": get_app_url()
            + url_for(
                "v2.download_blob", repository="%s/%s" % (namespace_name, repo_name), digest=digest
            ),
        },
    )
Ejemplo n.º 2
0
def _upload_chunk(blob_uploader, commit_digest=None):
    """
    Performs uploading of a chunk of data in the current request's stream, via the blob uploader
    given.

    If commit_digest is specified, the upload is committed to a blob once the stream's data has been
    read and stored.
    """
    start_offset, length = _start_offset_and_length(request.headers.get("content-range"))
    if None in {start_offset, length}:
        raise InvalidRequest(message="Invalid range header")

    input_fp = get_input_stream(request)

    try:
        # Upload the data received.
        blob_uploader.upload_chunk(app.config, input_fp, start_offset, length)

        if commit_digest is not None:
            # Commit the upload to a blob.
            return blob_uploader.commit_to_blob(app.config, commit_digest)
    except BlobTooLargeException as ble:
        raise LayerTooLarge(uploaded=ble.uploaded, max_allowed=ble.max_allowed)
    except BlobRangeMismatchException:
        logger.exception("Exception when uploading blob to %s", blob_uploader.blob_upload_id)
        _abort_range_not_satisfiable(
            blob_uploader.blob_upload.byte_count, blob_uploader.blob_upload_id
        )
    except BlobUploadException:
        logger.exception("Exception when uploading blob to %s", blob_uploader.blob_upload_id)
        raise BlobUploadInvalid()
Ejemplo n.º 3
0
def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
    # Ensure the digest is present before proceeding.
    digest = request.args.get("digest", None)
    if digest is None:
        raise BlobUploadInvalid(detail={"reason": "Missing digest arg on monolithic upload"})

    # Find the upload.
    repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
    if repository_ref is None:
        raise NameUnknown("repository not found")

    if app.config.get("FEATURE_QUOTA_MANAGEMENT", False):
        quota = namespacequota.verify_namespace_quota_during_upload(repository_ref)
        if quota["severity_level"] == "Reject":
            namespacequota.notify_organization_admins(
                repository_ref, "quota_error", {"severity": "Reject"}
            )
            raise QuotaExceeded

    uploader = retrieve_blob_upload_manager(
        repository_ref, upload_uuid, storage, _upload_settings()
    )
    if uploader is None:
        raise BlobUploadUnknown()

    # Upload the chunk for the blob and commit it once complete.
    with complete_when_uploaded(uploader):
        _upload_chunk(uploader, digest)

    # Write the response to the client.
    return Response(
        status=201,
        headers={
            "Docker-Content-Digest": digest,
            "Location": get_app_url()
            + url_for(
                "v2.download_blob", repository="%s/%s" % (namespace_name, repo_name), digest=digest
            ),
        },
    )