Пример #1
0
 def test_attributes(self):
     blob = mock.Mock()
     blob.chunk_size = 256 * 1024
     writer = BlobWriter(blob)
     self.assertFalse(writer.seekable())
     self.assertFalse(writer.readable())
     self.assertTrue(writer.writable())
     self.assertEqual(256 * 1024, writer._chunk_size)
Пример #2
0
    def test_num_retries_only(self, mock_warn):
        blob = mock.Mock()

        upload = mock.Mock()
        transport = mock.Mock()

        blob._initiate_resumable_upload.return_value = (upload, transport)

        with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
            # Create a writer.
            # It would be normal to use a context manager here, but not doing so
            # gives us more control over close() for test purposes.
            chunk_size = 8  # Note: Real upload requires a multiple of 256KiB.
            writer = BlobWriter(
                blob,
                chunk_size=chunk_size,
                content_type=PLAIN_CONTENT_TYPE,
                num_retries=2,
            )

        # The transmit_next_chunk method must actually consume bytes from the
        # sliding buffer for the flush() feature to work properly.
        upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
            chunk_size)

        # Write under chunk_size. This should be buffered and the upload not
        # initiated.
        writer.write(TEST_BINARY_DATA[0:4])
        blob.initiate_resumable_upload.assert_not_called()

        # Write over chunk_size. This should result in upload initialization
        # and multiple chunks uploaded.
        writer.write(TEST_BINARY_DATA[4:32])
        blob._initiate_resumable_upload.assert_called_once_with(
            blob.bucket.client,
            writer._buffer,
            PLAIN_CONTENT_TYPE,
            None,  # size
            2,  # num_retries
            chunk_size=chunk_size,
            retry=None,
        )
        upload.transmit_next_chunk.assert_called_with(transport)
        self.assertEqual(upload.transmit_next_chunk.call_count, 4)
        mock_warn.assert_called_once_with(_NUM_RETRIES_MESSAGE,
                                          DeprecationWarning,
                                          stacklevel=2)

        # Write another byte, finalize and close.
        writer.write(TEST_BINARY_DATA[32:33])
        writer.close()
        self.assertEqual(upload.transmit_next_chunk.call_count, 5)
Пример #3
0
    def test_write(self):
        blob = mock.Mock()

        upload = mock.Mock()
        transport = mock.Mock()

        blob._initiate_resumable_upload.return_value = (upload, transport)

        with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
            # Create a writer in text mode.
            # It would be normal to use a context manager here, but not doing so
            # gives us more control over close() for test purposes.
            chunk_size = 8  # Note: Real upload requires a multiple of 256KiB.
            unwrapped_writer = BlobWriter(
                blob,
                chunk_size=chunk_size,
                text_mode=True,
                num_retries=NUM_RETRIES,
                content_type=PLAIN_CONTENT_TYPE,
            )

        writer = io.TextIOWrapper(unwrapped_writer)

        # The transmit_next_chunk method must actually consume bytes from the
        # sliding buffer for the flush() feature to work properly.
        upload.transmit_next_chunk.side_effect = lambda _: unwrapped_writer._buffer.read(
            chunk_size
        )

        # Write under chunk_size. This should be buffered and the upload not
        # initiated.
        writer.write(TEST_MULTIBYTE_TEXT_DATA[0:2])
        blob.initiate_resumable_upload.assert_not_called()

        # Write all data and close.
        writer.write(TEST_MULTIBYTE_TEXT_DATA[2:])
        writer.close()

        blob._initiate_resumable_upload.assert_called_once_with(
            blob.bucket.client,
            unwrapped_writer._buffer,
            PLAIN_CONTENT_TYPE,
            None,
            NUM_RETRIES,
            chunk_size=chunk_size,
        )
        upload.transmit_next_chunk.assert_called_with(transport)
Пример #4
0
    def test_num_retries_and_retry_conflict(self):
        blob = mock.Mock()

        blob._initiate_resumable_upload.side_effect = ValueError

        with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
            # Create a writer.
            # It would be normal to use a context manager here, but not doing so
            # gives us more control over close() for test purposes.
            chunk_size = 8  # Note: Real upload requires a multiple of 256KiB.
            writer = BlobWriter(
                blob,
                chunk_size=chunk_size,
                content_type=PLAIN_CONTENT_TYPE,
                num_retries=2,
                retry=DEFAULT_RETRY,
            )

        # Write under chunk_size. This should be buffered and the upload not
        # initiated.
        writer.write(TEST_BINARY_DATA[0:4])
        blob.initiate_resumable_upload.assert_not_called()

        # Write over chunk_size. The mock will raise a ValueError, simulating
        # actual behavior when num_retries and retry are both specified.
        with self.assertRaises(ValueError):
            writer.write(TEST_BINARY_DATA[4:32])

        blob._initiate_resumable_upload.assert_called_once_with(
            blob.bucket.client,
            writer._buffer,
            PLAIN_CONTENT_TYPE,
            None,  # size
            2,  # num_retries
            chunk_size=chunk_size,
            retry=DEFAULT_RETRY,
        )
Пример #5
0
 def test_rejects_invalid_kwargs(self):
     blob = mock.Mock()
     with self.assertRaises(ValueError):
         BlobWriter(blob, invalid_kwarg=1)
Пример #6
0
    def test_seek_fails(self):
        blob = mock.Mock(chunk_size=None)
        writer = BlobWriter(blob)

        with self.assertRaises(io.UnsupportedOperation):
            writer.seek()
Пример #7
0
    def test_write(self):
        blob = mock.Mock()

        upload = mock.Mock()
        transport = mock.Mock()

        blob._initiate_resumable_upload.return_value = (upload, transport)

        with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
            # Create a writer with (arbitrary) arguments so we can validate the
            # arguments are used.
            # It would be normal to use a context manager here, but not doing so
            # gives us more control over close() for test purposes.
            upload_kwargs = {"if_metageneration_match": 1}
            chunk_size = 8  # Note: Real upload requires a multiple of 256KiB.
            writer = BlobWriter(
                blob,
                chunk_size=chunk_size,
                num_retries=NUM_RETRIES,
                content_type=PLAIN_CONTENT_TYPE,
                **upload_kwargs
            )

        # The transmit_next_chunk method must actually consume bytes from the
        # sliding buffer for the flush() feature to work properly.
        upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
            chunk_size
        )

        # Write under chunk_size. This should be buffered and the upload not
        # initiated.
        writer.write(TEST_BINARY_DATA[0:4])
        blob.initiate_resumable_upload.assert_not_called()

        # Write over chunk_size. This should result in upload initialization
        # and multiple chunks uploaded.
        writer.write(TEST_BINARY_DATA[4:32])
        blob._initiate_resumable_upload.assert_called_once_with(
            blob.bucket.client,
            writer._buffer,
            PLAIN_CONTENT_TYPE,
            None,
            NUM_RETRIES,
            chunk_size=chunk_size,
            **upload_kwargs
        )
        upload.transmit_next_chunk.assert_called_with(transport)
        self.assertEqual(upload.transmit_next_chunk.call_count, 4)

        # Write another byte, finalize and close.
        writer.write(TEST_BINARY_DATA[32:33])
        self.assertEqual(writer.tell(), 33)
        writer.close()
        self.assertEqual(upload.transmit_next_chunk.call_count, 5)
Пример #8
0
 def test_reject_wrong_chunk_size(self):
     blob = mock.Mock()
     blob.chunk_size = 123
     with self.assertRaises(ValueError):
         _ = BlobWriter(blob)
Пример #9
0
    def _make_blob_writer(*args, **kwargs):
        from google.cloud.storage.fileio import BlobWriter

        return BlobWriter(*args, **kwargs)
Пример #10
0
 def test_attributes_explicit(self):
     blob = mock.Mock()
     blob.chunk_size = 256 * 1024
     writer = BlobWriter(blob, chunk_size=512 * 1024, retry=DEFAULT_RETRY)
     self.assertEqual(writer._chunk_size, 512 * 1024)
     self.assertEqual(writer._retry, DEFAULT_RETRY)