def add_get_responses(self): chunksize = self.config.multipart_chunksize for i in range(0, len(self.content), chunksize): if i + chunksize > len(self.content): stream = six.BytesIO(self.content[i:]) self.stubber.add_response('get_object', {'Body': stream}) else: stream = six.BytesIO(self.content[i:i + chunksize]) self.stubber.add_response('get_object', {'Body': stream})
def test_iter(self): content = b'my content' body = six.BytesIO(content) ref_chunks = [] for chunk in DownloadChunkIterator(body, len(content)): ref_chunks.append(chunk) self.assertEqual(ref_chunks, [b'my content'])
def test_uses_bandwidth_limiter(self): self.content = b'a' * 1024 * 1024 self.stream = six.BytesIO(self.content) self.config = TransferConfig(max_request_concurrency=1, max_bandwidth=len(self.content) / 2) self._manager = TransferManager(self.client, self.config) self.add_head_object_response() self.add_successful_get_object_responses() start = time.time() future = self.manager.download(self.bucket, self.key, self.filename, self.extra_args) future.result() # This is just a smoke test to make sure that the limiter is # being used and not necessary its exactness. So we set the maximum # bandwidth to len(content)/2 per sec and make sure that it is # noticeably slower. Ideally it will take more than two seconds, but # given tracking at the beginning of transfers are not entirely # accurate setting at the initial start of a transfer, we give us # some flexibility by setting the expected time to half of the # theoretical time to take. self.assertGreaterEqual(time.time() - start, 1) # Ensure that the contents are correct with open(self.filename, 'rb') as f: self.assertEqual(self.content, f.read())
def setUp(self): # The stubbed client needs to run in a manager to be shared across # processes and have it properly consume the stubbed response across # processes. self.manager = StubbedClientManager() self.manager.start() self.stubbed_client = self.manager.StubbedClient() self.stubbed_client_factory = StubbedClientFactory(self.stubbed_client) self.client_factory_patch = mock.patch( 'ibm_s3transfer.processpool.ClientFactory', self.stubbed_client_factory ) self.client_factory_patch.start() self.files = FileCreator() self.config = ProcessTransferConfig( max_request_processes=1 ) self.downloader = ProcessPoolDownloader(config=self.config) self.bucket = 'mybucket' self.key = 'mykey' self.filename = self.files.full_path('filename') self.remote_contents = b'my content' self.stream = six.BytesIO(self.remote_contents)
def setUp(self): super(TestGetObjectWorker, self).setUp() self.files = FileCreator() self.queue = queue.Queue() self.client_factory = mock.Mock(ClientFactory) self.client_factory.create_client.return_value = self.client self.transfer_monitor = TransferMonitor() self.osutil = OSUtils() self.worker = GetObjectWorker( queue=self.queue, client_factory=self.client_factory, transfer_monitor=self.transfer_monitor, osutil=self.osutil ) self.transfer_id = self.transfer_monitor.notify_new_transfer() self.bucket = 'bucket' self.key = 'key' self.remote_contents = b'my content' self.temp_filename = self.files.create_file('tempfile', '') self.extra_args = {} self.offset = 0 self.final_filename = self.files.full_path('final_filename') self.stream = six.BytesIO(self.remote_contents) self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1000)
def test_iter_chunksize(self): content = b'1234' body = six.BytesIO(content) ref_chunks = [] for chunk in DownloadChunkIterator(body, 3): ref_chunks.append(chunk) self.assertEqual(ref_chunks, [b'123', b'4'])
def setUp(self): super(TestDownloadSubmissionTask, self).setUp() self.tempdir = tempfile.mkdtemp() self.filename = os.path.join(self.tempdir, 'myfile') self.bucket = 'mybucket' self.key = 'mykey' self.extra_args = {} self.subscribers = [] # Create a stream to read from self.content = b'my content' self.stream = six.BytesIO(self.content) # A list to keep track of all of the bodies sent over the wire # and their order. self.call_args = self.get_call_args() self.transfer_future = self.get_transfer_future(self.call_args) self.io_executor = BoundedExecutor(1000, 1) self.submission_main_kwargs = { 'client': self.client, 'config': self.config, 'osutil': self.osutil, 'request_executor': self.executor, 'io_executor': self.io_executor, 'transfer_future': self.transfer_future } self.submission_task = self.get_download_submission_task()
def test_upload_for_seekable_filelike_obj(self): self.add_put_object_response_with_default_expected_params() bytes_io = six.BytesIO(self.content) future = self.manager.upload(bytes_io, self.bucket, self.key, self.extra_args) future.result() self.assert_expected_client_calls_were_correct() self.assert_put_object_body_was_correct()
def test_download_file_ranged_download(self): half_of_content_length = int(len(self.remote_contents) / 2) self.stubbed_client.add_response( 'head_object', {'ContentLength': len(self.remote_contents)}) self.stubbed_client.add_response('get_object', { 'Body': six.BytesIO(self.remote_contents[:half_of_content_length]) }) self.stubbed_client.add_response('get_object', { 'Body': six.BytesIO(self.remote_contents[half_of_content_length:]) }) downloader = ProcessPoolDownloader(config=ProcessTransferConfig( multipart_chunksize=half_of_content_length, multipart_threshold=half_of_content_length, max_request_processes=1)) with downloader: downloader.download_file(self.bucket, self.key, self.filename) self.assert_contents(self.filename, self.remote_contents)
def test_upload_for_seekable_filelike_obj_that_has_been_seeked(self): self.add_put_object_response_with_default_expected_params() bytes_io = six.BytesIO(self.content) seek_pos = 5 bytes_io.seek(seek_pos) future = self.manager.upload(bytes_io, self.bucket, self.key, self.extra_args) future.result() self.assert_expected_client_calls_were_correct() self.assertEqual(b''.join(self.sent_bodies), self.content[seek_pos:])
def test_download_empty_object(self): self.content = b'' self.stream = six.BytesIO(self.content) self.add_head_object_response() self.add_successful_get_object_responses() future = self.manager.download(self.bucket, self.key, self.filename, self.extra_args) future.result() # Ensure that the empty file exists with open(self.filename, 'rb') as f: self.assertEqual(b'', f.read())
def create_stubbed_responses(self): return [{ 'method': 'head_object', 'service_response': { 'ContentLength': len(self.content) } }, { 'method': 'get_object', 'service_response': { 'Body': six.BytesIO(self.content[0:4]) } }, { 'method': 'get_object', 'service_response': { 'Body': six.BytesIO(self.content[4:8]) } }, { 'method': 'get_object', 'service_response': { 'Body': six.BytesIO(self.content[8:]) } }]
def test_download_for_seekable_filelike_obj(self): self.add_head_object_response() self.add_successful_get_object_responses() # Create a file-like object to test. In this case, it is a BytesIO # object. bytes_io = six.BytesIO() future = self.manager.download(self.bucket, self.key, bytes_io, self.extra_args) future.result() # Ensure that the contents are correct bytes_io.seek(0) self.assertEqual(self.content, bytes_io.read())
def setUp(self): super(TestGetObjectTask, self).setUp() self.bucket = 'mybucket' self.key = 'mykey' self.extra_args = {} self.callbacks = [] self.max_attempts = 5 self.io_executor = BoundedExecutor(1000, 1) self.content = b'my content' self.stream = six.BytesIO(self.content) self.fileobj = WriteCollector() self.osutil = OSUtils() self.io_chunksize = 64 * (1024**2) self.task_cls = GetObjectTask self.download_output_manager = DownloadSeekableOutputManager( self.osutil, self.transfer_coordinator, self.io_executor)
def test_download_multiple_files(self): self.stubbed_client.add_response( 'get_object', {'Body': self.stream} ) self.stubbed_client.add_response( 'get_object', {'Body': six.BytesIO(self.remote_contents)} ) with self.downloader: self.downloader.download_file( self.bucket, self.key, self.filename, expected_size=len(self.remote_contents)) other_file = self.files.full_path('filename2') self.downloader.download_file( self.bucket, self.key, other_file, expected_size=len(self.remote_contents)) self.assert_contents(self.filename, self.remote_contents) self.assert_contents(other_file, self.remote_contents)
def setUp(self): super(BaseDownloadTest, self).setUp() self.config = TransferConfig(max_request_concurrency=1) self._manager = TransferManager(self.client, self.config) # Create a temporary directory to write to self.tempdir = tempfile.mkdtemp() self.filename = os.path.join(self.tempdir, 'myfile') # Initialize some default arguments self.bucket = 'mybucket' self.key = 'mykey' self.extra_args = {} self.subscribers = [] # Create a stream to read from self.content = b'my content' self.stream = six.BytesIO(self.content)
def open_nonseekable(self, filename, mode): self.open_call_args.append((filename, mode)) return NonSeekableWriter(six.BytesIO(self.content))
def test_is_compatible_bytes_io(self): self.assertTrue(self.upload_input_manager.is_compatible(six.BytesIO()))
def test_is_compatible_with_bytesio(self): self.assertTrue( self.download_output_manager.is_compatible(six.BytesIO(), self.osutil))
def test_empty_content(self): body = six.BytesIO(b'') ref_chunks = [] for chunk in DownloadChunkIterator(body, 3): ref_chunks.append(chunk) self.assertEqual(ref_chunks, [b''])