def _submit_transfer(self, call_args, submission_task_cls, extra_main_kwargs=None): if not extra_main_kwargs: extra_main_kwargs = {} # Create a TransferFuture to return back to the user transfer_future, components = self._get_future_with_components( call_args) # Add any provided done callbacks to the created transfer future # to be invoked on the transfer future being complete. for callback in get_callbacks(transfer_future, 'done'): components['coordinator'].add_done_callback(callback) # Get the main kwargs needed to instantiate the submission task main_kwargs = self._get_submission_task_main_kwargs( transfer_future, extra_main_kwargs) # Submit a SubmissionTask that will submit all of the necessary # tasks needed to complete the S3 transfer. self._submission_executor.submit( submission_task_cls( transfer_coordinator=components['coordinator'], main_kwargs=main_kwargs ) ) # Increment the unique id counter for future transfer requests self._id_counter += 1 return transfer_future
def test_cleanups_only_ran_once_on_exception(self): # We want to be able to handle the case where the final task completes # and anounces done but there is an error in the submission task # which will cause it to need to anounce done as well. In this case, # we do not want the done callbacks to be invoke more than once. final_task = self.get_task(FailureTask, is_final=True) self.main_kwargs['executor'] = self.executor self.main_kwargs['tasks_to_submit'] = [final_task] submission_task = self.get_task( ExceptionSubmissionTask, main_kwargs=self.main_kwargs) subscriber = RecordingSubscriber() self.call_args.subscribers.append(subscriber) # Add the done callback to the callbacks to be invoked when the # transfer is done. done_callbacks = get_callbacks(self.transfer_future, 'done') for done_callback in done_callbacks: self.transfer_coordinator.add_done_callback(done_callback) submission_task() # Make sure the task failed to start self.assertEqual(self.transfer_coordinator.status, 'failed') # Make sure the on_done callback of the subscriber is called only once. self.assertEqual( subscriber.on_done_calls, [{'future': self.transfer_future}])
def yield_upload_part_bodies(self, transfer_future, config): part_size = config.multipart_chunksize full_file_size = transfer_future.meta.size num_parts = self._get_num_parts(transfer_future, part_size) callbacks = get_callbacks(transfer_future, 'progress') for part_number in range(1, num_parts + 1): start_byte = part_size * (part_number - 1) # Get a file-like object for that part and the size of the full # file size for the associated file-like object for that part. fileobj, full_size = self._get_upload_part_fileobj_with_full_size( transfer_future.meta.call_args.fileobj, start_byte=start_byte, part_size=part_size, full_file_size=full_file_size) # Wrap fileobj with interrupt reader that will quickly cancel # uploads if needed instead of having to wait for the socket # to completely read all of the data. fileobj = self._wrap_with_interrupt_reader(fileobj) # Wrap the file-like object into a ReadFileChunk to get progress. read_file_chunk = self._osutil.open_file_chunk_reader_from_fileobj( fileobj=fileobj, chunk_size=part_size, full_file_size=full_size, callbacks=callbacks) yield part_number, read_file_chunk
def test_cleanups_only_ran_once_on_exception(self): # We want to be able to handle the case where the final task completes # and anounces done but there is an error in the submission task # which will cause it to need to anounce done as well. In this case, # we do not want the done callbacks to be invoke more than once. final_task = self.get_task(FailureTask, is_final=True) self.main_kwargs['executor'] = self.executor self.main_kwargs['tasks_to_submit'] = [final_task] submission_task = self.get_task(ExceptionSubmissionTask, main_kwargs=self.main_kwargs) subscriber = RecordingSubscriber() self.call_args.subscribers.append(subscriber) # Add the done callback to the callbacks to be invoked when the # transfer is done. done_callbacks = get_callbacks(self.transfer_future, 'done') for done_callback in done_callbacks: self.transfer_coordinator.add_done_callback(done_callback) submission_task() # Make sure the task failed to start self.assertEqual(self.transfer_coordinator.status, 'failed') # Make sure the on_done callback of the subscriber is called only once. self.assertEqual(subscriber.on_done_calls, [{ 'future': self.transfer_future }])
def _submit_transfer(self, call_args, submission_task_cls, extra_main_kwargs=None): if not extra_main_kwargs: extra_main_kwargs = {} # Create a TransferFuture to return back to the user transfer_future, components = self._get_future_with_components( call_args) # Add any provided done callbacks to the created transfer future # to be invoked on the transfer future being complete. for callback in get_callbacks(transfer_future, 'done'): components['coordinator'].add_done_callback(callback) # Get the main kwargs needed to instantiate the submission task main_kwargs = self._get_submission_task_main_kwargs( transfer_future, extra_main_kwargs) # Submit a SubmissionTask that will submit all of the necessary # tasks needed to complete the S3 transfer. self._submission_executor.submit( submission_task_cls(transfer_coordinator=components['coordinator'], main_kwargs=main_kwargs)) # Increment the unique id counter for future transfer requests self._id_counter += 1 return transfer_future
def _get_progress_callbacks(self, transfer_future): callbacks = get_callbacks(transfer_future, 'progress') # We only want to be wrapping the callbacks if there are callbacks to # invoke because we do not want to be doing any unnecessary work if # there are no callbacks to invoke. if callbacks: return [AggregatedProgressCallback(callbacks)] return []
def _submit_ranged_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future): call_args = transfer_future.meta.call_args # Get the needed progress callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Determine the number of parts part_size = config.multipart_chunksize num_parts = int( math.ceil(transfer_future.meta.size / float(part_size))) # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() ranged_downloads = [] for i in range(num_parts): # Calculate the range parameter range_parameter = calculate_range_parameter( part_size, i, num_parts) # Inject the Range parameter to the parameters to be passed in # as extra args extra_args = {'Range': range_parameter} extra_args.update(call_args.extra_args) # Submit the ranged downloads ranged_downloads.append( self._transfer_coordinator.submit( request_executor, GetObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'start_index': i * part_size, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, } ), tag=get_object_tag ) ) # Send the necessary tasks to complete the download. self._complete_download( request_executor, io_executor, download_output_manager, ranged_downloads)
def _submit_ranged_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future): call_args = transfer_future.meta.call_args # Get the needed progress callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Determine the number of parts part_size = config.multipart_chunksize num_parts = int(math.ceil(transfer_future.meta.size / float(part_size))) # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Callback invoker to submit the final io task once all downloads # are complete. finalize_download_invoker = CountCallbackInvoker( self._get_final_io_task_submission_callback( download_output_manager, io_executor)) for i in range(num_parts): # Calculate the range parameter range_parameter = calculate_range_parameter( part_size, i, num_parts) # Inject the Range parameter to the parameters to be passed in # as extra args extra_args = {'Range': range_parameter} extra_args.update(call_args.extra_args) finalize_download_invoker.increment() # Submit the ranged downloads self._transfer_coordinator.submit( request_executor, GetObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'start_index': i * part_size, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, }, done_callbacks=[finalize_download_invoker.decrement]), tag=get_object_tag) finalize_download_invoker.finalize()
def get_put_object_body(self, transfer_future): callbacks = get_callbacks(transfer_future, 'progress') fileobj = transfer_future.meta.call_args.fileobj body = self._wrap_data(self._initial_data + fileobj.read(), callbacks) # Zero out the stored data so we don't have additional copies # hanging around in memory. self._initial_data = None return body
def get_put_object_body(self, transfer_future): callbacks = get_callbacks(transfer_future, 'progress') fileobj = transfer_future.meta.call_args.fileobj body = self._wrap_data( self._initial_data + fileobj.read(), callbacks) # Zero out the stored data so we don't have additional copies # hanging around in memory. self._initial_data = None return body
def test_get_callbacks(self): callbacks = get_callbacks(self.transfer_future, 'queued') # Make sure two callbacks were added as both subscribers had # an on_queued method. self.assertEqual(len(callbacks), 2) # Ensure that the callback was injected with the future by calling # one of them and checking that the future was used in the call. callbacks[0]() self.assertEqual( self.subscriber.on_queued_calls, [{'future': self.transfer_future}] )
def test_get_callbacks(self): callbacks = get_callbacks(self.transfer_future, 'queued') # Make sure two callbacks were added as both subscribers had # an on_queued method. self.assertEqual(len(callbacks), 2) # Ensure that the callback was injected with the future by calling # one of them and checking that the future was used in the call. callbacks[0]() self.assertEqual(self.subscriber.on_queued_calls, [{ 'future': self.transfer_future }])
def invoke_all_callbacks(*args, **kwargs): callbacks_list = [] if before_subscribers is not None: callbacks_list += before_subscribers callbacks_list += get_callbacks(future, callback_type) if after_subscribers is not None: callbacks_list += after_subscribers for callback in callbacks_list: # The get_callbacks helper will set the first augment # by keyword, the other augments need to be set by keyword # as well if callback_type == "progress": callback(bytes_transferred=args[0]) else: callback(*args, **kwargs)
def yield_upload_part_bodies(self, transfer_future, config): part_size = config.multipart_chunksize file_object = transfer_future.meta.call_args.fileobj callbacks = get_callbacks(transfer_future, 'progress') part_number = 0 # Continue reading parts from the file-like object until it is empty. while True: part_number += 1 part_content = self._read(file_object, part_size) if not part_content: break part_object = self._wrap_data(part_content, callbacks) # Zero out part_content to avoid hanging on to additional data. part_content = None yield part_number, part_object
def _main(self, transfer_future, **kwargs): """ :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future associated with the transfer request that tasks are being submitted for :param kwargs: Any additional kwargs that you may want to pass to the _submit() method """ try: self._transfer_coordinator.set_status_to_queued() # Before submitting any tasks, run all of the on_queued callbacks on_queued_callbacks = get_callbacks(transfer_future, 'queued') for on_queued_callback in on_queued_callbacks: on_queued_callback() # Once callbacks have been ran set the status to running. self._transfer_coordinator.set_status_to_running() # Call the submit method to start submitting tasks to execute the # transfer. self._submit(transfer_future=transfer_future, **kwargs) except BaseException as e: # If there was an exception raised during the submission of task # there is a chance that the final task that signals if a transfer # is done and too run the cleanup may never have been submitted in # the first place so we need to account accordingly. # # Note that BaseException is caught, instead of Exception, because # for some implmentations of executors, specifically the serial # implementation, the SubmissionTask is directly exposed to # KeyboardInterupts and so needs to cleanup and signal done # for those as well. # Set the exception, that caused the process to fail. self._log_and_set_exception(e) # Wait for all possibly associated futures that may have spawned # from this submission task have finished before we anounce the # transfer done. self._wait_for_all_submitted_futures_to_complete() # Announce the transfer as done, which will run any cleanups # and done callbacks as well. self._transfer_coordinator.announce_done()
def _main(self, transfer_future, **kwargs): """ :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future associated with the transfer request that tasks are being submitted for :param kwargs: Any additional kwargs that you may want to pass to the _submit() method """ try: self._transfer_coordinator.set_status_to_queued() # Before submitting any tasks, run all of the on_queued callbacks on_queued_callbacks = get_callbacks(transfer_future, 'queued') for on_queued_callback in on_queued_callbacks: on_queued_callback() # Once callbacks have been ran set the status to running. self._transfer_coordinator.set_status_to_running() # Call the submit method to start submitting tasks to execute the # transfer. self._submit(transfer_future=transfer_future, **kwargs) except BaseException as e: # If there was an exception rasied during the submission of task # there is a chance that the final task that signals if a transfer # is done and too run the cleanup may never have been submitted in # the first place so we need to account accordingly. # # Note that BaseException is caught, instead of Exception, because # for some implmentations of executors, specifically the serial # implementation, the SubmissionTask is directly exposed to # KeyboardInterupts and so needs to cleanup and signal done # for those as well. # Set the exception, that caused the process to fail. self._log_and_set_exception(e) # Wait for all possibly associated futures that may have spawned # from this submission task have finished before we anounce the # transfer done. self._wait_for_all_submitted_futures_to_complete() # Announce the transfer as done, which will run any cleanups # and done callbacks as well. self._transfer_coordinator.announce_done()
def get_put_object_body(self, transfer_future): # Get a file-like object for the given input fileobj = self._get_put_object_fileobj( transfer_future.meta.call_args.fileobj) # Wrap fileobj with interrupt reader that will quickly cancel # uploads if needed instead of having to wait for the socket # to completely read all of the data. fileobj = self._wrap_with_interrupt_reader(fileobj) callbacks = get_callbacks(transfer_future, 'progress') size = transfer_future.meta.size # Return the file-like object wrapped into a ReadFileChunk to get # progress. return self._osutil.open_file_chunk_reader_from_fileobj( fileobj=fileobj, chunk_size=size, full_file_size=size, callbacks=callbacks)
def _submit_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future): call_args = transfer_future.meta.call_args # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Get the needed callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Callback invoker to submit the final io task once the download # is complete. finalize_download_invoker = CountCallbackInvoker( self._get_final_io_task_submission_callback( download_output_manager, io_executor)) finalize_download_invoker.increment() # Submit the task to download the object. self._transfer_coordinator.submit( request_executor, GetObjectTask(transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, }, done_callbacks=[finalize_download_invoker.decrement ]), tag=get_object_tag) finalize_download_invoker.finalize()
def test_calls_done_callbacks_on_exception(self): submission_task = self.get_task( ExceptionSubmissionTask, main_kwargs=self.main_kwargs) subscriber = RecordingSubscriber() self.call_args.subscribers.append(subscriber) # Add the done callback to the callbacks to be invoked when the # transfer is done. done_callbacks = get_callbacks(self.transfer_future, 'done') for done_callback in done_callbacks: self.transfer_coordinator.add_done_callback(done_callback) submission_task() # Make sure the task failed to start self.assertEqual(self.transfer_coordinator.status, 'failed') # Make sure the on_done callback of the subscriber is called. self.assertEqual( subscriber.on_done_calls, [{'future': self.transfer_future}])
def _submit_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter): call_args = transfer_future.meta.call_args # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Get the needed callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Get the final io task to run once the download is complete. final_task = download_output_manager.get_final_io_task() # Submit the task to download the object. self._transfer_coordinator.submit( request_executor, ImmediatelyWriteIOGetObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, 'bandwidth_limiter': bandwidth_limiter }, done_callbacks=[final_task] ), tag=get_object_tag )
def _submit_copy_request(self, client, config, osutil, request_executor, transfer_future): call_args = transfer_future.meta.call_args # Get the needed progress callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Submit the request of a single copy. self._transfer_coordinator.submit( request_executor, CopyObjectTask(transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'copy_source': call_args.copy_source, 'bucket': call_args.bucket, 'key': call_args.key, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'size': transfer_future.meta.size }, is_final=True))
def _submit_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future): call_args = transfer_future.meta.call_args # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Get the needed callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Submit the task to download the object. download_future = self._transfer_coordinator.submit( request_executor, GetObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, } ), tag=get_object_tag ) # Send the necessary tasks to complete the download. self._complete_download( request_executor, io_executor, download_output_manager, [download_future])
def _submit_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future): call_args = transfer_future.meta.call_args # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Get the needed callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Submit the task to download the object. download_future = self._transfer_coordinator.submit( request_executor, GetObjectTask(transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, }), tag=get_object_tag) # Send the necessary tasks to complete the download. self._complete_download(request_executor, io_executor, download_output_manager, [download_future])
def _submit_copy_request(self, client, config, osutil, request_executor, transfer_future): call_args = transfer_future.meta.call_args # Get the needed progress callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Submit the request of a single copy. self._transfer_coordinator.submit( request_executor, CopyObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'copy_source': call_args.copy_source, 'bucket': call_args.bucket, 'key': call_args.key, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'size': transfer_future.meta.size }, is_final=True ) )
def _submit_multipart_request(self, client, config, osutil, request_executor, transfer_future): call_args = transfer_future.meta.call_args # Submit the request to create a multipart upload and make sure it # does not include any of the arguments used for copy part. create_multipart_extra_args = {} for param, val in call_args.extra_args.items(): if param not in self.CREATE_MULTIPART_ARGS_BLACKLIST: create_multipart_extra_args[param] = val create_multipart_future = self._transfer_coordinator.submit( request_executor, CreateMultipartUploadTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'extra_args': create_multipart_extra_args, } ) ) # Determine how many parts are needed based on filesize and # desired chunksize. part_size = config.multipart_chunksize adjuster = ChunksizeAdjuster() part_size = adjuster.adjust_chunksize( part_size, transfer_future.meta.size) num_parts = int( math.ceil(transfer_future.meta.size / float(part_size))) # Submit requests to upload the parts of the file. part_futures = [] progress_callbacks = get_callbacks(transfer_future, 'progress') for part_number in range(1, num_parts + 1): extra_part_args = self._extra_upload_part_args( call_args.extra_args) # The part number for upload part starts at 1 while the # range parameter starts at zero, so just subtract 1 off of # the part number extra_part_args['CopySourceRange'] = calculate_range_parameter( part_size, part_number-1, num_parts, transfer_future.meta.size) # Get the size of the part copy as well for the progress # callbacks. size = self._get_transfer_size( part_size, part_number-1, num_parts, transfer_future.meta.size ) part_futures.append( self._transfer_coordinator.submit( request_executor, CopyPartTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'copy_source': call_args.copy_source, 'bucket': call_args.bucket, 'key': call_args.key, 'part_number': part_number, 'extra_args': extra_part_args, 'callbacks': progress_callbacks, 'size': size }, pending_main_kwargs={ 'upload_id': create_multipart_future } ) ) ) complete_multipart_extra_args = self._extra_complete_multipart_args( call_args.extra_args) # Submit the request to complete the multipart upload. self._transfer_coordinator.submit( request_executor, CompleteMultipartUploadTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'extra_args': complete_multipart_extra_args, }, pending_main_kwargs={ 'upload_id': create_multipart_future, 'parts': part_futures }, is_final=True ) )
def _get_progress_callbacks(self, transfer_future): callbacks = get_callbacks(transfer_future, 'progress') return [AggregatedProgressCallback(callback) for callback in callbacks]
def test_get_callbacks_for_missing_type(self): callbacks = get_callbacks(self.transfer_future, 'fake_state') # There should be no callbacks as the subscribers will not have the # on_fake_state method self.assertEqual(len(callbacks), 0)
def _submit_multipart_request(self, client, config, osutil, request_executor, transfer_future): call_args = transfer_future.meta.call_args # Submit the request to create a multipart upload and make sure it # does not include any of the arguments used for copy part. create_multipart_extra_args = {} for param, val in call_args.extra_args.items(): if param not in self.CREATE_MULTIPART_ARGS_BLACKLIST: create_multipart_extra_args[param] = val create_multipart_future = self._transfer_coordinator.submit( request_executor, CreateMultipartUploadTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'extra_args': create_multipart_extra_args, } ) ) # Determine how many parts are needed based on filesize and # desired chunksize. part_size = config.multipart_chunksize num_parts = int( math.ceil(transfer_future.meta.size / float(part_size))) # Submit requests to upload the parts of the file. part_futures = [] progress_callbacks = get_callbacks(transfer_future, 'progress') for part_number in range(1, num_parts + 1): extra_part_args = self._extra_upload_part_args( call_args.extra_args) # The part number for upload part starts at 1 while the # range parameter starts at zero, so just subtract 1 off of # the part number extra_part_args['CopySourceRange'] = calculate_range_parameter( part_size, part_number-1, num_parts, transfer_future.meta.size) # Get the size of the part copy as well for the progress # callbacks. size = self._get_transfer_size( part_size, part_number-1, num_parts, transfer_future.meta.size ) part_futures.append( self._transfer_coordinator.submit( request_executor, CopyPartTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'copy_source': call_args.copy_source, 'bucket': call_args.bucket, 'key': call_args.key, 'part_number': part_number, 'extra_args': extra_part_args, 'callbacks': progress_callbacks, 'size': size }, pending_main_kwargs={ 'upload_id': create_multipart_future } ) ) ) # Submit the request to complete the multipart upload. self._transfer_coordinator.submit( request_executor, CompleteMultipartUploadTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key }, pending_main_kwargs={ 'upload_id': create_multipart_future, 'parts': part_futures }, is_final=True ) )