def _enqueue_tasks(self, files): total_files = 0 total_parts = 0 for filename in files: num_uploads = 1 # If uploading stream, it is required to read from the stream # to determine if the stream needs to be multipart uploaded. payload = None if filename.operation_name == 'upload': payload, is_multipart_task = \ self._pull_from_stream(self.multi_threshold) else: # Set the file size for the ``FileInfo`` object since # streams do not use a ``FileGenerator`` that usually # determines the size. filename.set_size_from_s3() is_multipart_task = self._is_multipart_task(filename) if is_multipart_task and not self.params['dryrun']: # If we're in dryrun mode, then we don't need the # real multipart tasks. We can just use a BasicTask # in the else clause below, which will print out the # fact that it's transferring a file rather than # the specific part tasks required to perform the # transfer. num_uploads = self._enqueue_multipart_tasks(filename, payload) else: task = tasks.BasicTask(session=self.session, filename=filename, parameters=self.params, result_queue=self.result_queue, payload=payload) self.executor.submit(task) total_files += 1 total_parts += num_uploads return total_files, total_parts
def _enqueue_tasks(self, files): total_files = 0 total_parts = 0 for filename in files: num_uploads = 1 is_multipart_task = self._is_multipart_task(filename) too_large = False if hasattr(filename, 'size'): too_large = filename.size > MAX_UPLOAD_SIZE if too_large and filename.operation_name == 'upload': warning_message = "File exceeds s3 upload limit of 5 TB." warning = create_warning(relative_path(filename.src), message=warning_message) self.result_queue.put(warning) elif is_multipart_task and not self.params['dryrun']: # If we're in dryrun mode, then we don't need the # real multipart tasks. We can just use a BasicTask # in the else clause below, which will print out the # fact that it's transferring a file rather than # the specific part tasks required to perform the # transfer. num_uploads = self._enqueue_multipart_tasks(filename) else: task = tasks.BasicTask(session=self.session, filename=filename, parameters=self.params, result_queue=self.result_queue) self.executor.submit(task) total_files += 1 total_parts += num_uploads return total_files, total_parts
def _enqueue_tasks(self, files): total_files = 0 total_parts = 0 for filename in files: num_uploads = 1 is_multipart_task = self._is_multipart_task(filename) too_large = False if hasattr(filename, 'size'): too_large = filename.size > MAX_UPLOAD_SIZE if too_large and filename.operation_name == 'upload': warning_message = "File exceeds s3 upload limit of 5 TB." warning = create_warning(relative_path(filename.src), warning_message) self.result_queue.put(warning) # Warn and skip over glacier incompatible tasks. elif not self.params.get('force_glacier_transfer') and \ not filename.is_glacier_compatible(): LOGGER.debug( 'Encountered glacier object s3://%s. Not performing ' '%s on object.' % (filename.src, filename.operation_name)) if not self.params['ignore_glacier_warnings']: warning = create_warning( 's3://'+filename.src, 'Object is of storage class GLACIER. Unable to ' 'perform %s operations on GLACIER objects. You must ' 'restore the object to be able to the perform ' 'operation.' % filename.operation_name ) self.result_queue.put(warning) continue elif is_multipart_task and not self.params['dryrun']: # If we're in dryrun mode, then we don't need the # real multipart tasks. We can just use a BasicTask # in the else clause below, which will print out the # fact that it's transferring a file rather than # the specific part tasks required to perform the # transfer. num_uploads = self._enqueue_multipart_tasks(filename) else: task = tasks.BasicTask( session=self.session, filename=filename, parameters=self.params, result_queue=self.result_queue) self.executor.submit(task) total_files += 1 total_parts += num_uploads return total_files, total_parts