예제 #1
0
    def test_assemble_duplicate_blobs(self):
        files = []
        file_checksum = sha1()
        blob = os.urandom(1024 * 1024 * 8)
        hash = sha1(blob).hexdigest()
        for _ in range(8):
            file_checksum.update(blob)
            files.append((io.BytesIO(blob), hash))

        # upload all blobs
        FileBlob.from_files(files, organization=self.organization)

        # find all blobs
        for reference, checksum in files:
            blob = FileBlob.objects.get(checksum=checksum)
            ref_bytes = reference.getvalue()
            assert blob.getfile().read(len(ref_bytes)) == ref_bytes
            FileBlobOwner.objects.filter(
                blob=blob, organization_id=self.organization.id).get()

        rv = assemble_file(
            AssembleTask.DIF,
            self.project,
            "testfile",
            file_checksum.hexdigest(),
            [x[1] for x in files],
            "dummy.type",
        )

        assert rv is not None
        f, tmp = rv
        assert f.checksum == file_checksum.hexdigest()
        assert f.type == "dummy.type"
예제 #2
0
파일: chunk.py 프로젝트: yndxz/sentry
    def post(self, request, organization):
        """
        Upload chunks and store them as FileBlobs
        `````````````````````````````````````````
        :pparam file file: The filename should be sha1 hash of the content.
                            Also not you can add up to MAX_CHUNKS_PER_REQUEST files
                            in this request.

        :auth: required
        """
        # Create a unique instance so our logger can be decoupled from the request
        # and used in threads.
        logger = logging.getLogger('sentry.files')
        logger.info('chunkupload.start')

        files = request.data.getlist('file')
        files += [GzipChunk(chunk) for chunk in request.data.getlist('file_gzip')]
        if len(files) == 0:
            # No files uploaded is ok
            logger.info('chunkupload.end', extra={'status': status.HTTP_200_OK})
            return Response(status=status.HTTP_200_OK)

        logger.info('chunkupload.post.files', extra={'len': len(files)})

        # Validate file size
        checksums = []
        size = 0
        for chunk in files:
            size += chunk.size
            if chunk.size > CHUNK_UPLOAD_BLOB_SIZE:
                logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
                return Response({'error': 'Chunk size too large'},
                                status=status.HTTP_400_BAD_REQUEST)
            checksums.append(chunk.name)

        if size > MAX_REQUEST_SIZE:
            logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
            return Response({'error': 'Request too large'},
                            status=status.HTTP_400_BAD_REQUEST)

        if len(files) > MAX_CHUNKS_PER_REQUEST:
            logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
            return Response({'error': 'Too many chunks'},
                            status=status.HTTP_400_BAD_REQUEST)

        try:
            FileBlob.from_files(izip(files, checksums),
                                organization=organization,
                                logger=logger)
        except IOError as err:
            logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
            return Response({'error': six.text_type(err)},
                            status=status.HTTP_400_BAD_REQUEST)

        logger.info('chunkupload.end', extra={'status': status.HTTP_200_OK})
        return Response(status=status.HTTP_200_OK)
예제 #3
0
파일: chunk.py 프로젝트: Kayle009/sentry
    def post(self, request, organization):
        """
        Upload chunks and store them as FileBlobs
        `````````````````````````````````````````
        :pparam file file: The filename should be sha1 hash of the content.
                            Also not you can add up to MAX_CHUNKS_PER_REQUEST files
                            in this request.

        :auth: required
        """
        # Create a unique instance so our logger can be decoupled from the request
        # and used in threads.
        logger = logging.getLogger('sentry.files')
        logger.info('chunkupload.start')

        files = request.FILES.getlist('file')
        files += [GzipChunk(chunk) for chunk in request.FILES.getlist('file_gzip')]
        if len(files) == 0:
            # No files uploaded is ok
            logger.info('chunkupload.end', extra={'status': status.HTTP_200_OK})
            return Response(status=status.HTTP_200_OK)

        logger.info('chunkupload.post.files', extra={'len': len(files)})

        # Validate file size
        checksums = []
        size = 0
        for chunk in files:
            size += chunk.size
            if chunk.size > CHUNK_UPLOAD_BLOB_SIZE:
                logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
                return Response({'error': 'Chunk size too large'},
                                status=status.HTTP_400_BAD_REQUEST)
            checksums.append(chunk.name)

        if size > MAX_REQUEST_SIZE:
            logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
            return Response({'error': 'Request too large'},
                            status=status.HTTP_400_BAD_REQUEST)

        if len(files) > MAX_CHUNKS_PER_REQUEST:
            logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
            return Response({'error': 'Too many chunks'},
                            status=status.HTTP_400_BAD_REQUEST)

        try:
            FileBlob.from_files(izip(files, checksums),
                                organization=organization,
                                logger=logger)
        except IOError as err:
            logger.info('chunkupload.end', extra={'status': status.HTTP_400_BAD_REQUEST})
            return Response({'error': six.text_type(err)},
                            status=status.HTTP_400_BAD_REQUEST)

        logger.info('chunkupload.end', extra={'status': status.HTTP_200_OK})
        return Response(status=status.HTTP_200_OK)
예제 #4
0
    def post(self, request, organization):
        """
        Upload chunks and store them as FileBlobs
        `````````````````````````````````````````
        :pparam file file: The filename should be sha1 hash of the content.
                            Also not you can add up to MAX_CHUNKS_PER_REQUEST files
                            in this request.

        :auth: required
        """
        # Create a unique instance so our logger can be decoupled from the request
        # and used in threads.
        logger = logging.getLogger("sentry.files")
        logger.info("chunkupload.start")

        files = []
        if request.data:
            files = request.data.getlist("file")
            files += [GzipChunk(chunk) for chunk in request.data.getlist("file_gzip")]

        if len(files) == 0:
            # No files uploaded is ok
            logger.info("chunkupload.end", extra={"status": status.HTTP_200_OK})
            return Response(status=status.HTTP_200_OK)

        logger.info("chunkupload.post.files", extra={"len": len(files)})

        # Validate file size
        checksums = []
        size = 0
        for chunk in files:
            size += chunk.size
            if chunk.size > settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE:
                logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
                return Response(
                    {"error": "Chunk size too large"}, status=status.HTTP_400_BAD_REQUEST
                )
            checksums.append(chunk.name)

        if size > MAX_REQUEST_SIZE:
            logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
            return Response({"error": "Request too large"}, status=status.HTTP_400_BAD_REQUEST)

        if len(files) > MAX_CHUNKS_PER_REQUEST:
            logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
            return Response({"error": "Too many chunks"}, status=status.HTTP_400_BAD_REQUEST)

        try:
            FileBlob.from_files(zip(files, checksums), organization=organization, logger=logger)
        except OSError as err:
            logger.info("chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST})
            return Response({"error": str(err)}, status=status.HTTP_400_BAD_REQUEST)

        logger.info("chunkupload.end", extra={"status": status.HTTP_200_OK})
        return Response(status=status.HTTP_200_OK)
예제 #5
0
    def post(self, request, organization):
        """
        Upload chunks and store them as FileBlobs
        `````````````````````````````````````````
        :pparam file file: The filename should be sha1 hash of the content.
                            Also not you can add up to MAX_CHUNKS_PER_REQUEST files
                            in this request.

        :auth: required
        """
        files = request.FILES.getlist('file')
        files += [
            GzipChunk(chunk) for chunk in request.FILES.getlist('file_gzip')
        ]
        if len(files) == 0:
            # No files uploaded is ok
            return Response(status=status.HTTP_200_OK)

        # Validate file size
        checksums = []
        size = 0
        for chunk in files:
            size += chunk.size
            if chunk.size > CHUNK_UPLOAD_BLOB_SIZE:
                return Response({'error': 'Chunk size too large'},
                                status=status.HTTP_400_BAD_REQUEST)
            checksums.append(chunk.name)

        if size > MAX_REQUEST_SIZE:
            return Response({'error': 'Request too large'},
                            status=status.HTTP_400_BAD_REQUEST)

        if len(files) > MAX_CHUNKS_PER_REQUEST:
            return Response({'error': 'Too many chunks'},
                            status=status.HTTP_400_BAD_REQUEST)

        try:
            FileBlob.from_files(izip(files, checksums),
                                organization=organization)
        except IOError as err:
            return Response({'error': six.text_type(err)},
                            status=status.HTTP_400_BAD_REQUEST)

        return Response(status=status.HTTP_200_OK)
예제 #6
0
    def test_assemble_from_files(self):
        files = []
        file_checksum = sha1()
        for _ in xrange(8):
            blob = os.urandom(1024 * 1024 * 8)
            hash = sha1(blob).hexdigest()
            file_checksum.update(blob)
            files.append((io.BytesIO(blob), hash))

        # upload all blobs
        FileBlob.from_files(files, organization=self.organization)

        # find all blobs
        for reference, checksum in files:
            blob = FileBlob.objects.get(checksum=checksum)
            ref_bytes = reference.getvalue()
            assert blob.getfile().read(len(ref_bytes)) == ref_bytes
            FileBlobOwner.objects.filter(
                blob=blob,
                organization=self.organization
            ).get()

        rv = assemble_file(AssembleTask.DIF,
                           self.project, 'testfile', file_checksum.hexdigest(),
                           [x[1] for x in files], 'dummy.type')

        assert rv is not None
        f, tmp = rv
        assert f.checksum == file_checksum.hexdigest()
        assert f.type == 'dummy.type'

        # upload all blobs a second time
        for f, _ in files:
            f.seek(0)
        FileBlob.from_files(files, organization=self.organization)

        # assemble a second time
        f = assemble_file(AssembleTask.DIF,
                          self.project, 'testfile', file_checksum.hexdigest(),
                          [x[1] for x in files], 'dummy.type')[0]
        assert f.checksum == file_checksum.hexdigest()
예제 #7
0
    def test_assemble_from_files(self):
        files = []
        file_checksum = sha1()
        for _ in xrange(8):
            blob = os.urandom(1024 * 1024 * 8)
            hash = sha1(blob).hexdigest()
            file_checksum.update(blob)
            files.append((io.BytesIO(blob), hash))

        # upload all blobs
        FileBlob.from_files(files, organization=self.organization)

        # find all blobs
        for reference, checksum in files:
            blob = FileBlob.objects.get(checksum=checksum)
            ref_bytes = reference.getvalue()
            assert blob.getfile().read(len(ref_bytes)) == ref_bytes
            FileBlobOwner.objects.filter(
                blob=blob,
                organization=self.organization
            ).get()

        rv = assemble_file(
            self.project, 'testfile', file_checksum.hexdigest(),
            [x[1] for x in files], 'dummy.type')

        assert rv is not None
        f, tmp = rv
        assert f.checksum == file_checksum.hexdigest()
        assert f.type == 'dummy.type'

        # upload all blobs a second time
        for f, _ in files:
            f.seek(0)
        FileBlob.from_files(files, organization=self.organization)

        # assemble a second time
        f = assemble_file(
            self.project, 'testfile', file_checksum.hexdigest(),
            [x[1] for x in files], 'dummy.type')[0]
        assert f.checksum == file_checksum.hexdigest()