Beispiel #1
0
    def get(self, request, organization):
        """
        Return chunk upload parameters
        ``````````````````````````````
        :auth: required
        """
        endpoint = options.get("system.upload-url-prefix")
        # We fallback to default system url if config is not set
        if len(endpoint) == 0:
            endpoint = options.get("system.url-prefix")

        url = reverse("sentry-api-0-chunk-upload", args=[organization.slug])
        endpoint = urljoin(endpoint.rstrip("/") + "/", url.lstrip("/"))

        return Response({
            "url": endpoint,
            "chunkSize": settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE,
            "chunksPerRequest": MAX_CHUNKS_PER_REQUEST,
            "maxFileSize": get_max_file_size(organization),
            "maxRequestSize": MAX_REQUEST_SIZE,
            "concurrency": MAX_CONCURRENCY,
            "hashAlgorithm": HASH_ALGORITHM,
            "compression": ["gzip"],
            "accept": CHUNK_UPLOAD_ACCEPT,
        })
Beispiel #2
0
    def get(self, request, organization):
        """
        Return chunk upload parameters
        ``````````````````````````````
        :auth: required
        """
        endpoint = options.get('system.upload-url-prefix')
        # We fallback to default system url if config is not set
        if len(endpoint) == 0:
            endpoint = options.get('system.url-prefix')

        url = reverse('sentry-api-0-chunk-upload', args=[organization.slug])
        endpoint = urljoin(endpoint.rstrip('/') + '/', url.lstrip('/'))

        return Response({
            'url': endpoint,
            'chunkSize': CHUNK_UPLOAD_BLOB_SIZE,
            'chunksPerRequest': MAX_CHUNKS_PER_REQUEST,
            'maxFileSize': get_max_file_size(organization),
            'maxRequestSize': MAX_REQUEST_SIZE,
            'concurrency': MAX_CONCURRENCY,
            'hashAlgorithm': HASH_ALGORITHM,
            'compression': ['gzip'],
            'accept': CHUNK_UPLOAD_ACCEPT,
        })
Beispiel #3
0
    def get(self, request, organization):
        """
        Return chunk upload parameters
        ``````````````````````````````
        :auth: required
        """
        endpoint = options.get('system.upload-url-prefix')
        # We fallback to default system url if config is not set
        if len(endpoint) == 0:
            endpoint = options.get('system.url-prefix')

        url = reverse('sentry-api-0-chunk-upload', args=[organization.slug])
        endpoint = urljoin(endpoint.rstrip('/') + '/', url.lstrip('/'))

        return Response(
            {
                'url': endpoint,
                'chunkSize': CHUNK_UPLOAD_BLOB_SIZE,
                'chunksPerRequest': MAX_CHUNKS_PER_REQUEST,
                'maxFileSize': get_max_file_size(organization),
                'maxRequestSize': MAX_REQUEST_SIZE,
                'concurrency': MAX_CONCURRENCY,
                'hashAlgorithm': HASH_ALGORITHM,
                'compression': ['gzip'],
            }
        )
Beispiel #4
0
def assemble_file(task, org_or_project, name, checksum, chunks, file_type):
    """
    Verifies and assembles a file model from chunks.

    This downloads all chunks from blob store to verify their integrity and
    associates them with a created file model. Additionally, it assembles the
    full file in a temporary location and verifies the complete content hash.

    Returns a tuple ``(File, TempFile)`` on success, or ``None`` on error.
    """
    from sentry.models import File, AssembleChecksumMismatch, FileBlob, Project

    if isinstance(org_or_project, Project):
        organization = org_or_project.organization
    else:
        organization = org_or_project

    # Load all FileBlobs from db since we can be sure here we already own all
    # chunks need to build the file
    file_blobs = FileBlob.objects.filter(
        checksum__in=chunks
    ).values_list('id', 'checksum', 'size')

    # Reject all files that exceed the maximum allowed size for this
    # organization. This value cannot be
    file_size = sum(x[2] for x in file_blobs)
    if file_size > get_max_file_size(organization):
        set_assemble_status(task, org_or_project.id, checksum, ChunkFileState.ERROR,
                            detail='File exceeds maximum size')
        return

    # Sanity check.  In case not all blobs exist at this point we have a
    # race condition.
    if set(x[1] for x in file_blobs) != set(chunks):
        set_assemble_status(task, org_or_project.id, checksum, ChunkFileState.ERROR,
                            detail='Not all chunks available for assembling')
        return

    # Ensure blobs are in the order and duplication in which they were
    # transmitted. Otherwise, we would assemble the file in the wrong order.
    ids_by_checksum = {chks: id for id, chks, _ in file_blobs}
    file_blob_ids = [ids_by_checksum[c] for c in chunks]

    file = File.objects.create(
        name=name,
        checksum=checksum,
        type=file_type,
    )
    try:
        temp_file = file.assemble_from_file_blob_ids(file_blob_ids, checksum)
    except AssembleChecksumMismatch:
        file.delete()
        set_assemble_status(task, org_or_project.id, checksum, ChunkFileState.ERROR,
                            detail='Reported checksum mismatch')
    else:
        file.save()
        return file, temp_file
Beispiel #5
0
def assemble_file(project, name, checksum, chunks, file_type):
    '''This assembles multiple chunks into on File.'''
    from sentry.models import File, ChunkFileState, AssembleChecksumMismatch, \
        FileBlob, set_assemble_status

    # Load all FileBlobs from db since we can be sure here we already own all
    # chunks need to build the file
    file_blobs = FileBlob.objects.filter(checksum__in=chunks).values_list(
        'id', 'checksum', 'size')

    # Reject all files that exceed the maximum allowed size for this
    # organization. This value cannot be
    file_size = sum(x[2] for x in file_blobs)
    if file_size > get_max_file_size(project.organization):
        set_assemble_status(project,
                            checksum,
                            ChunkFileState.ERROR,
                            detail='File exceeds maximum size')
        return

    # We need to make sure the blobs are in the order in which
    # we received them from the request.
    # Otherwise it could happen that we assemble the file in the wrong order
    # and get an garbage file.
    file_blob_ids = [
        x[0]
        for x in sorted(file_blobs, key=lambda blob: chunks.index(blob[1]))
    ]

    # Sanity check.  In case not all blobs exist at this point we have a
    # race condition.
    if set(x[1] for x in file_blobs) != set(chunks):
        set_assemble_status(project,
                            checksum,
                            ChunkFileState.ERROR,
                            detail='Not all chunks available for assembling')
        return

    file = File.objects.create(
        name=name,
        checksum=checksum,
        type=file_type,
    )
    try:
        temp_file = file.assemble_from_file_blob_ids(file_blob_ids, checksum)
    except AssembleChecksumMismatch:
        file.delete()
        set_assemble_status(project,
                            checksum,
                            ChunkFileState.ERROR,
                            detail='Reported checksum mismatch')
    else:
        file.save()
        return file, temp_file
Beispiel #6
0
def assemble_file(project, name, checksum, chunks, file_type):
    '''This assembles multiple chunks into on File.'''
    from sentry.models import File, ChunkFileState, AssembleChecksumMismatch, \
        FileBlob, set_assemble_status

    # Load all FileBlobs from db since we can be sure here we already own all
    # chunks need to build the file
    file_blobs = FileBlob.objects.filter(
        checksum__in=chunks
    ).values_list('id', 'checksum', 'size')

    # Reject all files that exceed the maximum allowed size for this
    # organization. This value cannot be
    file_size = sum(x[2] for x in file_blobs)
    if file_size > get_max_file_size(project.organization):
        set_assemble_status(project, checksum, ChunkFileState.ERROR,
                            detail='File exceeds maximum size')
        return

    # We need to make sure the blobs are in the order in which
    # we received them from the request.
    # Otherwise it could happen that we assemble the file in the wrong order
    # and get an garbage file.
    file_blob_ids = [x[0] for x in sorted(
        file_blobs, key=lambda blob: chunks.index(blob[1])
    )]

    # Sanity check.  In case not all blobs exist at this point we have a
    # race condition.
    if set(x[1] for x in file_blobs) != set(chunks):
        set_assemble_status(project, checksum, ChunkFileState.ERROR,
                            detail='Not all chunks available for assembling')
        return

    file = File.objects.create(
        name=name,
        checksum=checksum,
        type=file_type,
    )
    try:
        temp_file = file.assemble_from_file_blob_ids(file_blob_ids, checksum)
    except AssembleChecksumMismatch:
        file.delete()
        set_assemble_status(project, checksum, ChunkFileState.ERROR,
                            detail='Reported checksum mismatch')
    else:
        file.save()
        return file, temp_file
Beispiel #7
0
    def get(self, request: Request, organization) -> Response:
        """
        Return chunk upload parameters
        ``````````````````````````````
        :auth: required
        """
        endpoint = options.get("system.upload-url-prefix")
        relative_url = reverse("sentry-api-0-chunk-upload",
                               args=[organization.slug])

        # Starting `[email protected]` we added a support for relative chunk-uploads urls
        # User-Agent: sentry-cli/1.70.1
        user_agent = request.headers.get("User-Agent", "")
        sentrycli_version = SENTRYCLI_SEMVER_RE.search(user_agent)
        supports_relative_url = ((sentrycli_version is not None) and
                                 (int(sentrycli_version.group("major")) >= 1)
                                 and
                                 (int(sentrycli_version.group("minor")) >= 70)
                                 and
                                 (int(sentrycli_version.group("patch")) >= 1))

        # If user do not overwritten upload url prefix
        if len(endpoint) == 0:
            # And we support relative url uploads, return a relative, versionless endpoint (with `/api/0` stripped)
            if supports_relative_url:
                url = relative_url.lstrip(API_PREFIX)
            # Otherwise, if we do not support them, return an absolute, versioned endpoint with a default, system-wide prefix
            else:
                endpoint = options.get("system.url-prefix")
                url = urljoin(
                    endpoint.rstrip("/") + "/", relative_url.lstrip("/"))
        else:
            # If user overriden upload url prefix, we want an absolute, versioned endpoint, with user-configured prefix
            url = urljoin(endpoint.rstrip("/") + "/", relative_url.lstrip("/"))

        return Response({
            "url": url,
            "chunkSize": settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE,
            "chunksPerRequest": MAX_CHUNKS_PER_REQUEST,
            "maxFileSize": get_max_file_size(organization),
            "maxRequestSize": MAX_REQUEST_SIZE,
            "concurrency": MAX_CONCURRENCY,
            "hashAlgorithm": HASH_ALGORITHM,
            "compression": ["gzip"],
            "accept": CHUNK_UPLOAD_ACCEPT,
        })