Esempio n. 1
0
async def test_resumable_upload_bad_chunk_size(authorized_transport,
                                               img_stream):
    blob_name = os.path.basename(img_stream.name)
    # Create the actual upload object.
    upload = resumable_requests.ResumableUpload(
        utils.RESUMABLE_UPLOAD, _async_resumable_media.UPLOAD_CHUNK_SIZE)
    # Modify the ``upload`` **after** construction so we can
    # use a bad chunk size.
    upload._chunk_size = 1024
    assert upload._chunk_size < _async_resumable_media.UPLOAD_CHUNK_SIZE
    # Initiate the upload.
    metadata = {u"name": blob_name}
    response = await upload.initiate(authorized_transport, img_stream,
                                     metadata, JPEG_CONTENT_TYPE)
    # Make sure ``initiate`` succeeded and did not mangle the stream.
    await check_initiate(response, upload, img_stream, authorized_transport,
                         metadata)
    # Make the first request and verify that it fails.
    await check_bad_chunk(upload, authorized_transport)
    # Reset the chunk size (and the stream) and verify the "resumable"
    # URL is unusable.
    upload._chunk_size = _async_resumable_media.UPLOAD_CHUNK_SIZE
    img_stream.seek(0)
    upload._invalid = False
    await check_bad_chunk(upload, authorized_transport)
Esempio n. 2
0
async def _resumable_upload_helper(authorized_transport,
                                   stream,
                                   cleanup,
                                   checksum=None,
                                   headers=None):
    blob_name = os.path.basename(stream.name)
    # Make sure to clean up the uploaded blob when we are done.
    await cleanup(blob_name, authorized_transport)
    await check_does_not_exist(authorized_transport, blob_name)
    # Create the actual upload object.
    chunk_size = _async_resumable_media.UPLOAD_CHUNK_SIZE
    upload = resumable_requests.ResumableUpload(utils.RESUMABLE_UPLOAD,
                                                chunk_size,
                                                headers=headers,
                                                checksum=checksum)
    # Initiate the upload.
    metadata = {u"name": blob_name, u"metadata": {u"direction": u"north"}}
    response = await upload.initiate(authorized_transport, stream, metadata,
                                     JPEG_CONTENT_TYPE)
    # Make sure ``initiate`` succeeded and did not mangle the stream.
    await check_initiate(response, upload, stream, authorized_transport,
                         metadata)
    # Actually upload the file in chunks.
    num_chunks = await transmit_chunks(upload, authorized_transport, blob_name,
                                       metadata[u"metadata"])
    assert num_chunks == get_num_chunks(upload.total_bytes, chunk_size)
    # Download the content to make sure it's "working as expected".
    stream.seek(0)
    actual_contents = stream.read()
    await check_content(blob_name,
                        actual_contents,
                        authorized_transport,
                        headers=headers)
    # Make sure the upload is tombstoned.
    await check_tombstoned(upload, authorized_transport)
Esempio n. 3
0
 async def test_interleave_writes(self, authorized_transport, bucket,
                                  cleanup):
     blob_name = u"some-moar-stuff.bin"
     chunk_size = _async_resumable_media.UPLOAD_CHUNK_SIZE
     # Make sure to clean up the uploaded blob when we are done.
     await cleanup(blob_name, authorized_transport)
     await check_does_not_exist(authorized_transport, blob_name)
     # Start out the blob as a single chunk (but we will add to it).
     stream = io.BytesIO(b"Z" * chunk_size)
     # Create the actual upload object.
     upload = resumable_requests.ResumableUpload(utils.RESUMABLE_UPLOAD,
                                                 chunk_size)
     # Initiate the upload.
     metadata = {u"name": blob_name}
     response = await upload.initiate(
         authorized_transport,
         stream,
         metadata,
         BYTES_CONTENT_TYPE,
         stream_final=False,
     )
     # Make sure ``initiate`` succeeded and did not mangle the stream.
     await check_initiate(response, upload, stream, authorized_transport,
                          metadata)
     # Make sure total bytes was never set.
     assert upload.total_bytes is None
     # Make three requests.
     response0 = await upload.transmit_next_chunk(authorized_transport)
     await self._check_partial(upload, response0, chunk_size, 1)
     # Add another chunk before sending.
     self._add_bytes(stream, b"K" * chunk_size)
     response1 = await upload.transmit_next_chunk(authorized_transport)
     await self._check_partial(upload, response1, chunk_size, 2)
     # Add more bytes, but make sure less than a full chunk.
     last_chunk = 155
     self._add_bytes(stream, b"r" * last_chunk)
     response2 = await upload.transmit_next_chunk(authorized_transport)
     assert upload.finished
     # Verify the "clean-up" request.
     total_bytes = 2 * chunk_size + last_chunk
     assert upload.bytes_uploaded == total_bytes
     await check_response(
         response2,
         blob_name,
         actual_contents=stream.getvalue(),
         total_bytes=total_bytes,
         content_type=BYTES_CONTENT_TYPE,
     )
     self._check_range_sent(response2, 2 * chunk_size, total_bytes - 1,
                            total_bytes)
Esempio n. 4
0
    async def test_finish_at_chunk(self, authorized_transport, bucket,
                                   cleanup):
        blob_name = u"some-clean-stuff.bin"
        chunk_size = _async_resumable_media.UPLOAD_CHUNK_SIZE
        # Make sure to clean up the uploaded blob when we are done.
        await cleanup(blob_name, authorized_transport)
        await check_does_not_exist(authorized_transport, blob_name)
        # Make sure the blob size is an exact multiple of the chunk size.
        data = b"ab" * chunk_size
        total_bytes = len(data)
        stream = io.BytesIO(data)
        # Create the actual upload object.
        upload = resumable_requests.ResumableUpload(utils.RESUMABLE_UPLOAD,
                                                    chunk_size)
        # Initiate the upload.
        metadata = {u"name": blob_name}
        response = await upload.initiate(
            authorized_transport,
            stream,
            metadata,
            BYTES_CONTENT_TYPE,
            stream_final=False,
        )
        # Make sure ``initiate`` succeeded and did not mangle the stream.
        await check_initiate(response, upload, stream, authorized_transport,
                             metadata)
        # Make sure total bytes was never set.
        assert upload.total_bytes is None
        # Make three requests.
        response0 = await upload.transmit_next_chunk(authorized_transport)
        await self._check_partial(upload, response0, chunk_size, 1)

        response1 = await upload.transmit_next_chunk(authorized_transport)
        await self._check_partial(upload, response1, chunk_size, 2)

        response2 = await upload.transmit_next_chunk(authorized_transport)
        assert upload.finished
        # Verify the "clean-up" request.
        assert upload.bytes_uploaded == 2 * chunk_size
        await check_response(
            response2,
            blob_name,
            actual_contents=data,
            total_bytes=total_bytes,
            content_type=BYTES_CONTENT_TYPE,
        )
        self._check_range_sent(response2, None, None, 2 * chunk_size)
Esempio n. 5
0
async def _resumable_upload_recover_helper(authorized_transport,
                                           cleanup,
                                           headers=None):
    blob_name = u"some-bytes.bin"
    chunk_size = _async_resumable_media.UPLOAD_CHUNK_SIZE
    data = b"123" * chunk_size  # 3 chunks worth.
    # Make sure to clean up the uploaded blob when we are done.
    await cleanup(blob_name, authorized_transport)
    await check_does_not_exist(authorized_transport, blob_name)
    # Create the actual upload object.
    upload = resumable_requests.ResumableUpload(utils.RESUMABLE_UPLOAD,
                                                chunk_size,
                                                headers=headers)
    # Initiate the upload.
    metadata = {u"name": blob_name}
    stream = io.BytesIO(data)
    response = await upload.initiate(authorized_transport, stream, metadata,
                                     BYTES_CONTENT_TYPE)
    # Make sure ``initiate`` succeeded and did not mangle the stream.
    await check_initiate(response, upload, stream, authorized_transport,
                         metadata)
    # Make the first request.
    response = await upload.transmit_next_chunk(authorized_transport)
    assert response.status == _async_resumable_media.PERMANENT_REDIRECT
    # Call upload.recover().
    await sabotage_and_recover(upload, stream, authorized_transport,
                               chunk_size)
    # Now stream what remains.
    num_chunks = await transmit_chunks(
        upload,
        authorized_transport,
        blob_name,
        None,
        num_chunks=1,
        content_type=BYTES_CONTENT_TYPE,
    )
    assert num_chunks == 3
    # Download the content to make sure it's "working as expected".
    actual_contents = stream.getvalue()
    await check_content(blob_name,
                        actual_contents,
                        authorized_transport,
                        headers=headers)
    # Make sure the upload is tombstoned.
    await check_tombstoned(upload, authorized_transport)
Esempio n. 6
0
 async def test_smaller_than_chunk_size(self, authorized_transport, bucket,
                                        cleanup):
     blob_name = os.path.basename(ICO_FILE)
     chunk_size = _async_resumable_media.UPLOAD_CHUNK_SIZE
     # Make sure to clean up the uploaded blob when we are done.
     await cleanup(blob_name, authorized_transport)
     await check_does_not_exist(authorized_transport, blob_name)
     # Make sure the blob is smaller than the chunk size.
     total_bytes = os.path.getsize(ICO_FILE)
     assert total_bytes < chunk_size
     # Create the actual upload object.
     upload = resumable_requests.ResumableUpload(utils.RESUMABLE_UPLOAD,
                                                 chunk_size)
     # Initiate the upload.
     metadata = {u"name": blob_name}
     with open(ICO_FILE, u"rb") as stream:
         response = await upload.initiate(
             authorized_transport,
             stream,
             metadata,
             ICO_CONTENT_TYPE,
             stream_final=False,
         )
         # Make sure ``initiate`` succeeded and did not mangle the stream.
         await check_initiate(response, upload, stream,
                              authorized_transport, metadata)
         # Make sure total bytes was never set.
         assert upload.total_bytes is None
         # Make the **ONLY** request.
         response = await upload.transmit_next_chunk(authorized_transport)
         self._check_range_sent(response, 0, total_bytes - 1, total_bytes)
         await check_response(response, blob_name, total_bytes=total_bytes)
         # Download the content to make sure it's "working as expected".
         stream.seek(0)
         actual_contents = stream.read()
         await check_content(blob_name, actual_contents,
                             authorized_transport)
         # Make sure the upload is tombstoned.
         await check_tombstoned(upload, authorized_transport)