def obj_stream(bucket_name, src, dest_name, filename): bucket = Obj.conn.get_bucket(bucket_name) policy = bucket.get_acl() if dest_name: current_path = '' for dir_part in dest_name.lstrip(os.sep).split(os.sep): current_path = current_path + dir_part + '/' create_directory(bucket, current_path) key_name = current_path + filename else: key_name = filename # always multipart since total size of stream is unknown logging.info("Starting a multipart upload using stream.") m = MultiPartStream() m.start_upload(bucket_name, key_name, src, policy)
def obj_upload(bucket_name, src, dest_name, recursive=False, multi=False, checksum=False): # retranslate to the full absolute path src = os.path.abspath(src) # retieve the bucket bucket = Obj.conn.get_bucket(bucket_name) try: size = os.stat(src).st_size except OSError as e: logging.error(e) return policy = bucket.get_acl() # use a closure to capture the current bucket def cancel_multipart_handler(signal, frame): for upload in bucket.get_all_multipart_uploads(): upload.cancel_upload() logging.info("Removed incomplete multipart upload: %s" % str(upload)) umobj_add_handler(signal.SIGINT, cancel_multipart_handler) if recursive and os.path.isdir(src): prefix = src.split(os.sep)[-1] if dest_name: prefix = dest_name.rstrip(os.sep) + '/' + prefix for directory in walk_path(prefix): directory = directory + '/' if not check_directory(bucket, directory): create_directory(bucket, directory) operations = sum([len(files) for r, d, files in os.walk(src.rstrip(os.sep))]) pbar = progressbar.ProgressBar(maxval=operations) pbar.start() count = 0 for root, dirs, files in os.walk(src.rstrip(os.sep)): # we will not create the base directory if root != src: directory = '%s/%s/' % (prefix, lremove(src, root).lstrip(os.sep)) if not check_directory(bucket, directory): create_directory(bucket, directory) count += 1 if count < operations: pbar.update(count) for f in files: filename = root + os.sep + f try: file_st = os.stat(filename) size = file_st.st_size except OSError as e: logging.error(e) continue if root != src: keyname = '%s/%s/%s' % (prefix, lremove(src, root).lstrip(os.sep), f) else: keyname = '%s/%s' % (prefix, f) if checksum and not check_key_upload( bucket, keyname, filename): continue logging.info("Upload key %s from file %s" % (keyname, filename)) if (multi and size > 0) or (size > (1024 * 1024 * 1024)): m = MultiPart() m.start_upload(bucket_name, keyname, filename, policy) else: key = bucket.new_key(keyname) res = upload_file(key, filename, progress=False) if res >= 0: logging.debug("Applying bucket policy %s" % policy) ## set the owner of the policy to the upload user policy.owner = key.get_acl().owner key.set_acl(policy) count += 1 if count < operations: pbar.update(count) pbar.finish() else: if os.path.isdir(src): logging.warning("Skipping directory %s, " % src + "use the recursive option.") return if not os.path.isfile(src): logging.error("File %s does not exist." % src) return if dest_name: current_path = '' for dir_part in dest_name.lstrip(os.sep).split(os.sep)[:-1]: current_path = current_path + dir_part + '/' create_directory(bucket, current_path) if dest_name.endswith('/'): key_name = current_path + os.path.basename(src) else: key_name = dest_name else: key_name = os.path.basename(src) if checksum and not check_key_upload(bucket, key_name, src): return if multi or (size > (1024 * 1024 * 1024)): logging.info("Starting a multipart upload.") m = MultiPart() m.start_upload(bucket_name, key_name, src, policy) else: key = bucket.new_key(key_name) res = upload_file(key, src) if res >= 0: logging.debug("Applying bucket policy %s" % policy) ## set the owner of the policy to the upload user policy.owner = key.get_acl().owner key.set_acl(policy)