def upload_file(bucket, source, destination, s3_ssenc, bufsize): retry_count = 0 while True: try: mp = bucket.initiate_multipart_upload(destination, encrypt_key=s3_ssenc) logger.info("Initialized multipart upload for file {!s} to {!s}".format(source, destination)) except Exception as exc: logger.error("Error while initializing multipart upload for file {!s} to {!s}".format(source, destination)) logger.error(exc.message) return False try: for i, chunk in enumerate(compressed_pipe(source, bufsize)): mp.upload_part_from_file(chunk, i + 1, cb=s3_progress_update_callback) except Exception as exc: logger.error("Error uploading file {!s} to {!s}.\ Retry count: {}".format(source, destination, retry_count)) logger.error(exc.message) if retry_count >= MAX_RETRY_COUNT: logger.error("Retried too many times uploading file {!s}".format(source)) cancel_upload(bucket, mp, destination) return False else: time.sleep(SLEEP_TIME) retry_count = retry_count + 1 else: try: mp.complete_upload() except Exception as exc: logger.error("Error completing multipart upload for file {!s} to {!s}".format(source, destination)) logger.error(exc.message) logger.error(mp.to_xml()) cancel_upload(bucket, mp, destination) return False else: return True
def upload_file(bucket, source, destination, s3_ssenc, bufsize, reduced_redundancy, rate_limit): mp = None retry_count = 0 sleep_time = SLEEP_TIME while True: try: if mp is None: # Initiate the multi-part upload. try: mp = bucket.initiate_multipart_upload( destination, encrypt_key=s3_ssenc, reduced_redundancy=reduced_redundancy ) logger.info("Initialized multipart upload for file {!s} to {!s}".format(source, destination)) except Exception as exc: logger.error( "Error while initializing multipart upload for file {!s} to {!s}".format(source, destination) ) logger.error(exc.message) raise try: for i, chunk in enumerate(compressed_pipe(source, bufsize, rate_limit)): mp.upload_part_from_file(chunk, i + 1, cb=s3_progress_update_callback) except Exception as exc: logger.error("Error uploading file {!s} to {!s}".format(source, destination)) logger.error(exc.message) raise try: mp.complete_upload() except Exception as exc: logger.error("Error completing multipart file upload for file {!s} to {!s}".format(source, destination)) logger.error(exc.message) # The multi-part object may be in a bad state. Extract an error # message if we can, then discard it. try: logger.error(mp.to_xml()) except Exception as exc: pass cancel_upload(bucket, mp, destination) mp = None raise # Successful upload, return the uploaded file. return source except Exception as exc: # Failure anywhere reaches here. retry_count = retry_count + 1 if retry_count > MAX_RETRY_COUNT: logger.error("Retried too many times uploading file {!s}".format(source)) # Abort the multi-part upload if it was ever initiated. if mp is not None: cancel_upload(bucket, mp, destination) return None else: logger.info("Sleeping before retry") time.sleep(sleep_time) sleep_time = sleep_time * SLEEP_MULTIPLIER logger.info("Retrying {}/{}".format(retry_count, MAX_RETRY_COUNT))
def upload_file(bucket, source, destination, s3_ssenc, bufsize, reduced_redundancy, rate_limit): mp = None retry_count = 0 sleep_time = SLEEP_TIME while True: try: if mp is None: # Initiate the multi-part upload. try: mp = bucket.initiate_multipart_upload(destination, encrypt_key=s3_ssenc, reduced_redundancy=reduced_redundancy) logger.info("Initialized multipart upload for file {!s} to {!s}".format(source, destination)) except Exception as exc: logger.error("Error while initializing multipart upload for file {!s} to {!s}".format(source, destination)) logger.error(exc.message) raise try: for i, chunk in enumerate(compressed_pipe(source, bufsize, rate_limit)): mp.upload_part_from_file(chunk, i + 1, cb=s3_progress_update_callback) except Exception as exc: logger.error("Error uploading file {!s} to {!s}".format(source, destination)) logger.error(exc.message) raise try: mp.complete_upload() except Exception as exc: logger.error("Error completing multipart file upload for file {!s} to {!s}".format(source, destination)) logger.error(exc.message) # The multi-part object may be in a bad state. Extract an error # message if we can, then discard it. try: logger.error(mp.to_xml()) except Exception as exc: pass cancel_upload(bucket, mp, destination) mp = None raise # Successful upload, return the uploaded file. return source except Exception as exc: # Failure anywhere reaches here. retry_count = retry_count + 1 if retry_count > MAX_RETRY_COUNT: logger.error("Retried too many times uploading file {!s}".format(source)) # Abort the multi-part upload if it was ever initiated. if mp is not None: cancel_upload(bucket, mp, destination) return None else: logger.info("Sleeping before retry") time.sleep(sleep_time) sleep_time = sleep_time * SLEEP_MULTIPLIER logger.info("Retrying {}/{}".format(retry_count, MAX_RETRY_COUNT))
def upload(self, mpu_id, bufsize, rate_limit, quiet): parts = [] uploaded_bytes = 0 for i, chunk in enumerate( compressed_pipe(self.path, bufsize, rate_limit, quiet)): i += 1 part = self.s3.upload_part(Body=chunk, Bucket=self.bucket, Key=self.key, UploadId=mpu_id, PartNumber=i) parts.append({"PartNumber": i, "ETag": part["ETag"]}) #uploaded_bytes += bufsize #self.logger.info("{0} of {1} uploaded ({2:.3f}%)".format( # uploaded_bytes, self.total_bytes, # as_percent(uploaded_bytes, self.total_bytes))) return parts