def test_cntrl_c_in_context_manager_cancels_incomplete_transfers(self):
        # The purpose of this test is to make sure if an error is raised
        # in the body of the context manager, incomplete transfers will
        # be cancelled with value of the exception wrapped by a CancelledError

        # NOTE: The fact that delete() was chosen to test this is arbitrary
        # other than it is the easiet to set up for the stubber.
        # The specific operation is not important to the purpose of this test.
        num_transfers = 100
        futures = []

        for _ in range(num_transfers):
            self.stubber.add_response('delete_object', {})

        manager = TransferManager(
            self.client,
            TransferConfig(max_request_concurrency=1,
                           max_submission_concurrency=1))
        try:
            with manager:
                for i in range(num_transfers):
                    futures.append(manager.delete('mybucket', 'mykey'))
                raise KeyboardInterrupt()
        except KeyboardInterrupt:
            # At least one of the submitted futures should have been
            # cancelled.
            with self.assertRaisesRegexp(CancelledError,
                                         'KeyboardInterrupt()'):
                for future in futures:
                    future.result()
 def test_enable_disable_callbacks_only_ever_registered_once(self):
     body = SignalTransferringBody()
     request = create_request_object({
         'method': 'PUT',
         'url': 'https://s3.amazonaws.com',
         'body': body,
         'headers': {},
         'context': {}
     })
     # Create two TransferManager's using the same client
     TransferManager(self.client)
     TransferManager(self.client)
     self.client.meta.events.emit('request-created.s3',
                                  request=request,
                                  operation_name='PutObject')
     # The client should have only have the enable/disable callback
     # handlers registered once depite being used for two different
     # TransferManagers.
     self.assertEqual(
         body.signal_transferring_call_count, 1,
         'The enable_callback() should have only ever been registered once')
     self.assertEqual(
         body.signal_not_transferring_call_count, 1,
         'The disable_callback() should have only ever been registered '
         'once')
    def test_uses_bandwidth_limiter(self):
        self.content = b'a' * 1024 * 1024
        self.stream = six.BytesIO(self.content)
        self.config = TransferConfig(max_request_concurrency=1,
                                     max_bandwidth=len(self.content) / 2)
        self._manager = TransferManager(self.client, self.config)

        self.add_head_object_response()
        self.add_successful_get_object_responses()

        start = time.time()
        future = self.manager.download(self.bucket, self.key, self.filename,
                                       self.extra_args)
        future.result()
        # This is just a smoke test to make sure that the limiter is
        # being used and not necessary its exactness. So we set the maximum
        # bandwidth to len(content)/2 per sec and make sure that it is
        # noticeably slower. Ideally it will take more than two seconds, but
        # given tracking at the beginning of transfers are not entirely
        # accurate setting at the initial start of a transfer, we give us
        # some flexibility by setting the expected time to half of the
        # theoretical time to take.
        self.assertGreaterEqual(time.time() - start, 1)

        # Ensure that the contents are correct
        with open(self.filename, 'rb') as f:
            self.assertEqual(self.content, f.read())
예제 #4
0
    def test_limits_in_memory_chunks_for_fileobj(self):
        # Limit the maximum in memory chunks to one but make number of
        # threads more than one. This means that the upload will have to
        # happen sequentially despite having many threads available because
        # data is sequentially partitioned into chunks in memory and since
        # there can only every be one in memory chunk, each upload part will
        # have to happen one at a time.
        self.config.max_request_concurrency = 10
        self.config.max_in_memory_upload_chunks = 1
        self._manager = TransferManager(self.client, self.config)

        # Add some default stubbed responses.
        # These responses are added in order of part number so if the
        # multipart upload is not done sequentially, which it should because
        # we limit the in memory upload chunks to one, the stubber will
        # raise exceptions for mismatching parameters for partNumber when
        # once the upload() method is called on the transfer manager.
        # If there is a mismatch, the stubber error will propogate on
        # the future.result()
        self.add_create_multipart_response_with_default_expected_params()
        self.add_upload_part_responses_with_default_expected_params()
        self.add_complete_multipart_response_with_default_expected_params()
        with open(self.filename, 'rb') as f:
            future = self.manager.upload(f, self.bucket, self.key,
                                         self.extra_args)
            future.result()

        # Make sure that the stubber had all of its stubbed responses consumed.
        self.assert_expected_client_calls_were_correct()
        # Ensure the contents were uploaded in sequentially order by checking
        # the sent contents were in order.
        self.assert_upload_part_bodies_were_correct()
예제 #5
0
 def setUp(self):
     super(TestMultipartUpload, self).setUp()
     self.chunksize = 4
     self.config = TransferConfig(max_request_concurrency=1,
                                  multipart_threshold=1,
                                  multipart_chunksize=self.chunksize)
     self._manager = TransferManager(self.client, self.config)
     self.multipart_id = 'my-upload-id'
예제 #6
0
    def test_uses_provided_osutil(self):
        osutil = RecordingOSUtils()
        # Use the recording os utility for the transfer manager
        self._manager = TransferManager(self.client, self.config, osutil)

        self.add_put_object_response_with_default_expected_params()

        future = self.manager.upload(self.filename, self.bucket, self.key)
        future.result()

        # The upload should have used the os utility. We check this by making
        # sure that the recorded opens are as expected.
        expected_opens = [(self.filename, 'rb')]
        self.assertEqual(osutil.open_records, expected_opens)
    def test_uses_provided_osutil(self):
        osutil = RecordingOSUtils()
        # Use the recording os utility for the transfer manager
        self._manager = TransferManager(self.client, self.config, osutil)

        self.add_head_object_response()
        self.add_successful_get_object_responses()

        future = self.manager.download(**self.create_call_kwargs())
        future.result()
        # The osutil should have had its open() method invoked when opening
        # a temporary file and its rename_file() method invoked when the
        # the temporary file was moved to its final location.
        self.assertEqual(len(osutil.open_records), 1)
        self.assertEqual(len(osutil.rename_records), 1)
    def test_retry_failure(self):
        self.add_head_object_response()

        max_retries = 3
        self.config.num_download_attempts = max_retries
        self._manager = TransferManager(self.client, self.config)
        # Add responses that fill up the maximum number of retries.
        self.add_n_retryable_get_object_responses(max_retries)

        future = self.manager.download(**self.create_call_kwargs())

        # A retry exceeded error should have happened.
        with self.assertRaises(RetriesExceededError):
            future.result()

        # All of the retries should have been used up.
        self.stubber.assert_no_pending_responses()
예제 #9
0
    def setUp(self):
        super(BaseCopyTest, self).setUp()
        self.config = TransferConfig(max_request_concurrency=1,
                                     multipart_chunksize=MIN_UPLOAD_CHUNKSIZE,
                                     multipart_threshold=MIN_UPLOAD_CHUNKSIZE *
                                     4)
        self._manager = TransferManager(self.client, self.config)

        # Initialize some default arguments
        self.bucket = 'mybucket'
        self.key = 'mykey'
        self.copy_source = {'Bucket': 'mysourcebucket', 'Key': 'mysourcekey'}
        self.extra_args = {}
        self.subscribers = []

        self.half_chunksize = int(MIN_UPLOAD_CHUNKSIZE / 2)
        self.content = b'0' * (2 * MIN_UPLOAD_CHUNKSIZE + self.half_chunksize)
    def setUp(self):
        super(BaseDownloadTest, self).setUp()
        self.config = TransferConfig(max_request_concurrency=1)
        self._manager = TransferManager(self.client, self.config)

        # Create a temporary directory to write to
        self.tempdir = tempfile.mkdtemp()
        self.filename = os.path.join(self.tempdir, 'myfile')

        # Initialize some default arguments
        self.bucket = 'mybucket'
        self.key = 'mykey'
        self.extra_args = {}
        self.subscribers = []

        # Create a stream to read from
        self.content = b'my content'
        self.stream = six.BytesIO(self.content)
예제 #11
0
def create_transfer_manager(client, config, osutil=None):
    """Creates a transfer manager based on configuration

    :type client: ibm_boto3.client
    :param client: The S3 client to use

    :type config: ibm_boto3.s3.transfer.TransferConfig
    :param config: The transfer config to use

    :type osutil: ibm_s3transfer.utils.OSUtils
    :param osutil: The os utility to use

    :rtype: ibm_s3transfer.manager.TransferManager
    :returns: A transfer manager based on parameters provided
    """
    executor_cls = None
    if not config.use_threads:
        executor_cls = NonThreadedExecutor
    return TransferManager(client, config, osutil, executor_cls)
예제 #12
0
    def test_sigv4_progress_callbacks_invoked_once(self):
        # Reset the client and manager to use sigv4
        self.reset_stubber_with_new_client(
            {'config': Config(signature_version='s3v4')})
        self.client.meta.events.register('before-parameter-build.s3.*',
                                         self.collect_body)
        self._manager = TransferManager(self.client, self.config)

        # Add the stubbed response.
        self.add_put_object_response_with_default_expected_params()

        subscriber = RecordingSubscriber()
        future = self.manager.upload(self.filename,
                                     self.bucket,
                                     self.key,
                                     subscribers=[subscriber])
        future.result()
        self.assert_expected_client_calls_were_correct()

        # The amount of bytes seen should be the same as the file size
        self.assertEqual(subscriber.calculate_bytes_seen(), len(self.content))
예제 #13
0
    def setUp(self):
        super(BaseUploadTest, self).setUp()
        # TODO: We do not want to use the real MIN_UPLOAD_CHUNKSIZE
        # when we're adjusting parts.
        # This is really wasteful and fails CI builds because self.contents
        # would normally use 10MB+ of memory.
        # Until there's an API to configure this, we're patching this with
        # a min size of 1.  We can't patch MIN_UPLOAD_CHUNKSIZE directly
        # because it's already bound to a default value in the
        # chunksize adjuster.  Instead we need to patch out the
        # chunksize adjuster class.
        self.adjuster_patch = mock.patch(
            'ibm_s3transfer.upload.ChunksizeAdjuster',
            lambda: ChunksizeAdjuster(min_size=1))
        self.adjuster_patch.start()
        self.config = TransferConfig(max_request_concurrency=1)
        self._manager = TransferManager(self.client, self.config)

        # Create a temporary directory with files to read from
        self.tempdir = tempfile.mkdtemp()
        self.filename = os.path.join(self.tempdir, 'myfile')
        self.content = b'my content'

        with open(self.filename, 'wb') as f:
            f.write(self.content)

        # Initialize some default arguments
        self.bucket = 'mybucket'
        self.key = 'mykey'
        self.extra_args = {}
        self.subscribers = []

        # A list to keep track of all of the bodies sent over the wire
        # and their order.
        self.sent_bodies = []
        self.client.meta.events.register('before-parameter-build.s3.*',
                                         self.collect_body)
예제 #14
0
    def test_upload_with_bandwidth_limiter(self):
        self.content = b'a' * 1024 * 1024
        with open(self.filename, 'wb') as f:
            f.write(self.content)
        self.config = TransferConfig(max_request_concurrency=1,
                                     max_bandwidth=len(self.content) / 2)
        self._manager = TransferManager(self.client, self.config)

        self.add_put_object_response_with_default_expected_params()
        start = time.time()
        future = self.manager.upload(self.filename, self.bucket, self.key)
        future.result()
        # This is just a smoke test to make sure that the limiter is
        # being used and not necessary its exactness. So we set the maximum
        # bandwidth to len(content)/2 per sec and make sure that it is
        # noticeably slower. Ideally it will take more than two seconds, but
        # given tracking at the beginning of transfers are not entirely
        # accurate setting at the initial start of a transfer, we give us
        # some flexibility by setting the expected time to half of the
        # theoretical time to take.
        self.assertGreaterEqual(time.time() - start, 1)

        self.assert_expected_client_calls_were_correct()
        self.assert_put_object_body_was_correct()
 def test_use_custom_executor_implementation(self):
     mocked_executor_cls = mock.Mock(BaseExecutor)
     transfer_manager = TransferManager(self.client,
                                        executor_cls=mocked_executor_cls)
     transfer_manager.delete('bucket', 'key')
     self.assertTrue(mocked_executor_cls.return_value.submit.called)
 def setUp(self):
     super(TestDeleteObject, self).setUp()
     self.bucket = 'mybucket'
     self.key = 'mykey'
     self.manager = TransferManager(self.client)
예제 #17
0
 def setUp(self):
     self.client = mock.Mock()
     self.manager = mock.Mock(TransferManager(self.client))
     self.transfer = S3Transfer(manager=self.manager)
     self.callback = mock.Mock()
예제 #18
0
 def create_transfer_manager(self, config=None):
     return TransferManager(self.client, config=config)
 def setUp(self):
     super(TestRangedDownload, self).setUp()
     self.config = TransferConfig(max_request_concurrency=1,
                                  multipart_threshold=1,
                                  multipart_chunksize=4)
     self._manager = TransferManager(self.client, self.config)
 def test_unicode_exception_in_context_manager(self):
     with self.assertRaises(ArbitraryException):
         with TransferManager(self.client):
             raise ArbitraryException(u'\u2713')
예제 #21
0
def create_transfer_manager(credentials):
    cos_client = create_client(credentials)
    return TransferManager(cos_client)