def test_write(self): blob = mock.Mock() upload = mock.Mock() transport = mock.Mock() blob._initiate_resumable_upload.return_value = (upload, transport) with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1): # Create a writer with (arbitrary) arguments so we can validate the # arguments are used. # It would be normal to use a context manager here, but not doing so # gives us more control over close() for test purposes. upload_kwargs = {"if_metageneration_match": 1} chunk_size = 8 # Note: Real upload requires a multiple of 256KiB. writer = BlobWriter( blob, chunk_size=chunk_size, num_retries=NUM_RETRIES, content_type=PLAIN_CONTENT_TYPE, **upload_kwargs ) # The transmit_next_chunk method must actually consume bytes from the # sliding buffer for the flush() feature to work properly. upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read( chunk_size ) # Write under chunk_size. This should be buffered and the upload not # initiated. writer.write(TEST_BINARY_DATA[0:4]) blob.initiate_resumable_upload.assert_not_called() # Write over chunk_size. This should result in upload initialization # and multiple chunks uploaded. writer.write(TEST_BINARY_DATA[4:32]) blob._initiate_resumable_upload.assert_called_once_with( blob.bucket.client, writer._buffer, PLAIN_CONTENT_TYPE, None, NUM_RETRIES, chunk_size=chunk_size, **upload_kwargs ) upload.transmit_next_chunk.assert_called_with(transport) self.assertEqual(upload.transmit_next_chunk.call_count, 4) # Write another byte, finalize and close. writer.write(TEST_BINARY_DATA[32:33]) self.assertEqual(writer.tell(), 33) writer.close() self.assertEqual(upload.transmit_next_chunk.call_count, 5)
def test_num_retries_only(self, mock_warn): blob = mock.Mock() upload = mock.Mock() transport = mock.Mock() blob._initiate_resumable_upload.return_value = (upload, transport) with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1): # Create a writer. # It would be normal to use a context manager here, but not doing so # gives us more control over close() for test purposes. chunk_size = 8 # Note: Real upload requires a multiple of 256KiB. writer = BlobWriter( blob, chunk_size=chunk_size, content_type=PLAIN_CONTENT_TYPE, num_retries=2, ) # The transmit_next_chunk method must actually consume bytes from the # sliding buffer for the flush() feature to work properly. upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read( chunk_size) # Write under chunk_size. This should be buffered and the upload not # initiated. writer.write(TEST_BINARY_DATA[0:4]) blob.initiate_resumable_upload.assert_not_called() # Write over chunk_size. This should result in upload initialization # and multiple chunks uploaded. writer.write(TEST_BINARY_DATA[4:32]) blob._initiate_resumable_upload.assert_called_once_with( blob.bucket.client, writer._buffer, PLAIN_CONTENT_TYPE, None, # size 2, # num_retries chunk_size=chunk_size, retry=None, ) upload.transmit_next_chunk.assert_called_with(transport) self.assertEqual(upload.transmit_next_chunk.call_count, 4) mock_warn.assert_called_once_with(_NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2) # Write another byte, finalize and close. writer.write(TEST_BINARY_DATA[32:33]) writer.close() self.assertEqual(upload.transmit_next_chunk.call_count, 5)