def test_update_upload_progress__progress_is_non_numerical( self, write_mock): io.update_upload_progress('a') write_mock.assert_called_once_with( '\rUploading: [--------------------------------------------------] 0% error: progress var must be float\r\n' )
def multithreaded_upload(bucket, key, file_path): """ Upload a file in multiple parts using multiple threads. Takes advantage of S3's multipart upload. :param bucket: S3 bucket name :param key: keyname of file to be uploaded :param file_path: full path location of file to be uploaded :param region: region to use for S3 :return: Result dictionary """ size = os.path.getsize(file_path) total_parts = math.ceil(size / CHUNK_SIZE) # Number of parts needed LOG.debug('Doing multi-threaded upload. Parts Needed=' + str(total_parts)) upload_id = _get_multipart_upload_id(bucket, key) io.update_upload_progress(0) # Upload parts try: etaglist = [] # list for part id's (etags) with open(file_path, 'rb') as f: # Create threads to handle parts of upload lock = threading.Lock() jobs = [] for i in range(THREAD_COUNT): p = threading.Thread( target=_upload_chunk, args=(f, lock, etaglist, total_parts, bucket, key, upload_id), ) p.daemon = True jobs.append(p) p.start() _wait_for_threads(jobs) # S3 requires the etag list to be sorted etaglist = sorted(etaglist, key=lambda k: k['PartNumber']) if not _all_parts_were_uploaded(etaglist, total_parts): LOG.debug( 'Uploaded {0} parts, but should have uploaded {1} parts.'. format(len(etaglist), total_parts)) raise UploadError( 'An error occured while uploading Application Version. ' 'Use the --debug option for more information if the problem persists.' ) result = _make_api_call('complete_multipart_upload', Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload=dict(Parts=etaglist)) return result except (Exception, KeyboardInterrupt): # We dont want to clean up multipart in case a user decides to # continue later raise
def test_update_upload_progress__progress_is_between_zero_and_one( self, write_mock): io.update_upload_progress(0.50) write_mock.assert_called_once_with( '\rUploading: [#########################-------------------------] 50% ' )
def test_update_upload_progress__progress_is_complete( self, write_mock ): io.update_upload_progress(1) write_mock.assert_called_once_with( '\rUploading: [##################################################] 100% Done...\r\n' )
def test_update_upload_progress__progress_is_zero( self, write_mock ): io.update_upload_progress(0) write_mock.assert_called_once_with( '\rUploading: [--------------------------------------------------] 0% ' )
def test_update_upload_progress__progress_is_negative( self, write_mock ): io.update_upload_progress(-1) write_mock.assert_called_once_with( '\rUploading: [--------------------------------------------------] 0% Halt...\r\n' )
def _upload_chunk(f, lock, etaglist, total_parts, bucket, key, upload_id): LOG.debug('Creating child thread') while True: data, part = _read_next_section_from_file(f, lock) if not data: LOG.debug('No data left, closing') return # First check to see if s3 already has part for i in range(0, 5): try: etag = _get_part_etag(bucket, key, part, upload_id) if etag is None: b = BytesIO() b.write(data) b.seek(0) response = _make_api_call('upload_part', Bucket=bucket, Key=key, UploadId=upload_id, Body=b, PartNumber=part) etag = response['ETag'] etaglist.append({'PartNumber': part, 'ETag': etag}) progress = (1 / total_parts) * len(etaglist) io.update_upload_progress(progress) # No errors, break out of loop break except EndOfTestError: return except Exception as e: # We want to swallow all exceptions or else they will be # printed as a stack trace to the Console # Exceptions are typically connections reset and # Various things LOG.debug('Exception raised: ' + str(e))