def setUp(self): super(TestUploadFilenameInputManager, self).setUp() self.upload_input_manager = UploadFilenameInputManager( self.osutil, self.transfer_coordinator) self.call_args = CallArgs(fileobj=self.filename, subscribers=self.subscribers) self.future = self.get_transfer_future(self.call_args)
def setUp(self): super(TestUploadFilenameInputManager, self).setUp() self.upload_input_manager = UploadFilenameInputManager( self.osutil, self.transfer_coordinator) self.call_args = CallArgs( fileobj=self.filename, subscribers=self.subscribers) self.future = self.get_transfer_future(self.call_args)
class TestUploadFilenameInputManager(BaseUploadInputManagerTest): def setUp(self): super(TestUploadFilenameInputManager, self).setUp() self.upload_input_manager = UploadFilenameInputManager( self.osutil, self.transfer_coordinator) self.call_args = CallArgs(fileobj=self.filename, subscribers=self.subscribers) self.future = self.get_transfer_future(self.call_args) def test_is_compatible(self): self.assertTrue( self.upload_input_manager.is_compatible( self.future.meta.call_args.fileobj)) def test_stores_bodies_in_memory_put_object(self): self.assertFalse( self.upload_input_manager.stores_body_in_memory('put_object')) def test_stores_bodies_in_memory_upload_part(self): self.assertFalse( self.upload_input_manager.stores_body_in_memory('upload_part')) def test_provide_transfer_size(self): self.upload_input_manager.provide_transfer_size(self.future) # The provided file size should be equal to size of the contents of # the file. self.assertEqual(self.future.meta.size, len(self.content)) def test_requires_multipart_upload(self): self.future.meta.provide_transfer_size(len(self.content)) # With the default multipart threshold, the length of the content # should be smaller than the threshold thus not requiring a multipart # transfer. self.assertFalse( self.upload_input_manager.requires_multipart_upload( self.future, self.config)) # Decreasing the threshold to that of the length of the content of # the file should trigger the need for a multipart upload. self.config.multipart_threshold = len(self.content) self.assertTrue( self.upload_input_manager.requires_multipart_upload( self.future, self.config)) def test_get_put_object_body(self): self.future.meta.provide_transfer_size(len(self.content)) read_file_chunk = self.upload_input_manager.get_put_object_body( self.future) read_file_chunk.enable_callback() # The file-like object provided back should be the same as the content # of the file. with read_file_chunk: self.assertEqual(read_file_chunk.read(), self.content) # The file-like object should also have been wrapped with the # on_queued callbacks to track the amount of bytes being transferred. self.assertEqual(self.recording_subscriber.calculate_bytes_seen(), len(self.content)) def test_get_put_object_body_is_interruptable(self): self.future.meta.provide_transfer_size(len(self.content)) read_file_chunk = self.upload_input_manager.get_put_object_body( self.future) # Set an exception in the transfer coordinator self.transfer_coordinator.set_exception(InterruptionError) # Ensure the returned read file chunk can be interrupted with that # error. with self.assertRaises(InterruptionError): read_file_chunk.read() def test_yield_upload_part_bodies(self): # Adjust the chunk size to something more grainular for testing. self.config.multipart_chunksize = 4 self.future.meta.provide_transfer_size(len(self.content)) # Get an iterator that will yield all of the bodies and their # respective part number. part_iterator = self.upload_input_manager.yield_upload_part_bodies( self.future, self.config.multipart_chunksize) expected_part_number = 1 for part_number, read_file_chunk in part_iterator: # Ensure that the part number is as expected self.assertEqual(part_number, expected_part_number) read_file_chunk.enable_callback() # Ensure that the body is correct for that part. with read_file_chunk: self.assertEqual(read_file_chunk.read(), self._get_expected_body_for_part(part_number)) expected_part_number += 1 # All of the file-like object should also have been wrapped with the # on_queued callbacks to track the amount of bytes being transferred. self.assertEqual(self.recording_subscriber.calculate_bytes_seen(), len(self.content)) def test_yield_upload_part_bodies_are_interruptable(self): # Adjust the chunk size to something more grainular for testing. self.config.multipart_chunksize = 4 self.future.meta.provide_transfer_size(len(self.content)) # Get an iterator that will yield all of the bodies and their # respective part number. part_iterator = self.upload_input_manager.yield_upload_part_bodies( self.future, self.config.multipart_chunksize) # Set an exception in the transfer coordinator self.transfer_coordinator.set_exception(InterruptionError) for _, read_file_chunk in part_iterator: # Ensure that each read file chunk yielded can be interrupted # with that error. with self.assertRaises(InterruptionError): read_file_chunk.read()
class TestUploadFilenameInputManager(BaseUploadInputManagerTest): def setUp(self): super(TestUploadFilenameInputManager, self).setUp() self.upload_input_manager = UploadFilenameInputManager( self.osutil, self.transfer_coordinator) self.call_args = CallArgs( fileobj=self.filename, subscribers=self.subscribers) self.future = self.get_transfer_future(self.call_args) def test_is_compatible(self): self.assertTrue( self.upload_input_manager.is_compatible( self.future.meta.call_args.fileobj) ) def test_stores_bodies_in_memory_put_object(self): self.assertFalse( self.upload_input_manager.stores_body_in_memory('put_object')) def test_stores_bodies_in_memory_upload_part(self): self.assertFalse( self.upload_input_manager.stores_body_in_memory('upload_part')) def test_provide_transfer_size(self): self.upload_input_manager.provide_transfer_size(self.future) # The provided file size should be equal to size of the contents of # the file. self.assertEqual(self.future.meta.size, len(self.content)) def test_requires_multipart_upload(self): self.future.meta.provide_transfer_size(len(self.content)) # With the default multipart threshold, the length of the content # should be smaller than the threshold thus not requiring a multipart # transfer. self.assertFalse( self.upload_input_manager.requires_multipart_upload( self.future, self.config)) # Decreasing the threshold to that of the length of the content of # the file should trigger the need for a multipart upload. self.config.multipart_threshold = len(self.content) self.assertTrue( self.upload_input_manager.requires_multipart_upload( self.future, self.config)) def test_get_put_object_body(self): self.future.meta.provide_transfer_size(len(self.content)) read_file_chunk = self.upload_input_manager.get_put_object_body( self.future) read_file_chunk.enable_callback() # The file-like object provided back should be the same as the content # of the file. with read_file_chunk: self.assertEqual(read_file_chunk.read(), self.content) # The file-like object should also have been wrapped with the # on_queued callbacks to track the amount of bytes being transferred. self.assertEqual( self.recording_subscriber.calculate_bytes_seen(), len(self.content)) def test_get_put_object_body_is_interruptable(self): self.future.meta.provide_transfer_size(len(self.content)) read_file_chunk = self.upload_input_manager.get_put_object_body( self.future) # Set an exception in the transfer coordinator self.transfer_coordinator.set_exception(InterruptionError) # Ensure the returned read file chunk can be interrupted with that # error. with self.assertRaises(InterruptionError): read_file_chunk.read() def test_yield_upload_part_bodies(self): # Adjust the chunk size to something more grainular for testing. self.config.multipart_chunksize = 4 self.future.meta.provide_transfer_size(len(self.content)) # Get an iterator that will yield all of the bodies and their # respective part number. part_iterator = self.upload_input_manager.yield_upload_part_bodies( self.future, self.config.multipart_chunksize) expected_part_number = 1 for part_number, read_file_chunk in part_iterator: # Ensure that the part number is as expected self.assertEqual(part_number, expected_part_number) read_file_chunk.enable_callback() # Ensure that the body is correct for that part. with read_file_chunk: self.assertEqual( read_file_chunk.read(), self._get_expected_body_for_part(part_number)) expected_part_number += 1 # All of the file-like object should also have been wrapped with the # on_queued callbacks to track the amount of bytes being transferred. self.assertEqual( self.recording_subscriber.calculate_bytes_seen(), len(self.content)) def test_yield_upload_part_bodies_are_interruptable(self): # Adjust the chunk size to something more grainular for testing. self.config.multipart_chunksize = 4 self.future.meta.provide_transfer_size(len(self.content)) # Get an iterator that will yield all of the bodies and their # respective part number. part_iterator = self.upload_input_manager.yield_upload_part_bodies( self.future, self.config.multipart_chunksize) # Set an exception in the transfer coordinator self.transfer_coordinator.set_exception(InterruptionError) for _, read_file_chunk in part_iterator: # Ensure that each read file chunk yielded can be interrupted # with that error. with self.assertRaises(InterruptionError): read_file_chunk.read()