def test_upload_part(self): key = 'requestpayment-test-upload-part-object' filename = key + '.txt' content = b'1' * 1024 * 1024 headers = dict() headers[OSS_REQUEST_PAYER] = "requester" content = random_bytes(1024 * 1024) with open(filename, 'wb') as f: f.write(content) total_size = os.path.getsize(filename) # Set part size part_size = determine_part_size(total_size, preferred_size=(100*1024)) # Init multipart without payer setting, should be failed. self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.init_multipart_upload, key) # Init multipart with payer setting, should be successful. upload_id = self.payer_bucket.init_multipart_upload(key, headers=headers).upload_id parts = [] # Upload part without payer setting, should be failed. with open(filename, 'rb') as fileobj: part_number = 1 num_to_upload = part_size self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.upload_part, key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) # Upload part with payer setting, should be successful. with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = self.payer_bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload), headers=headers) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # Complete multipart upload without payer setting, should be failed. self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.complete_multipart_upload, key, upload_id, parts) # Complete multipart upload with payer setting, should be successful. result = self.payer_bucket.complete_multipart_upload(key, upload_id, parts, headers=headers) self.assertEqual(result.status, 200) self.bucket.delete_object(key) os.remove(filename)
def upload(self, upload_path, filepath): """ upload_path 文件上传后的完整路径包括本身 filepath 本地文件路径 """ key = upload_path filename = filepath total_size = os.path.getsize(filename) # determine_part_size方法用来确定分片大小。 part_size = determine_part_size(total_size, preferred_size=100 * 1024) # 初始化分片。 upload_id = self.bucket.init_multipart_upload(key).upload_id parts = [] # 逐个上传分片。 with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。 result = self.bucket.upload_part( key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传。 self.bucket.complete_multipart_upload(key, upload_id, parts)
def uploadFile(self): filelist = self.file_name() if filelist: try: for file in filelist: fileDir = file.split(self.dirname)[1] key = (self.dirname + fileDir).replace('\\', '/') total_size = os.path.getsize(file) # determine_part_size方法用来确定分片大小。 part_size = determine_part_size(total_size, preferred_size=100 * 1024) # 初始化分片。 upload_id = self.bucket.init_multipart_upload(key).upload_id parts = [] # 逐个上传分片。 with open(file, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。 result = self.bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传。 self.bucket.complete_multipart_upload(key, upload_id, parts) logging.info('upload file to yun ok') self.sendMail() except Exception as e: logging.info('upload file to yun error') logging.error(e)
def large_file_to_oss(id, file, type_name='courses'): try: file_name = '' if file: _, file_ext = os.path.splitext(file.name) md = hashlib.md5() md.update((str(time.time()) + file.name).encode('utf8')) file_name = type_name + '/' + str(id) + '/' + md.hexdigest() + file_ext oss_file_name = os.path.join(settings.ALIYUN_OSS_DIRECTORY_PREFIX, file_name) bucket = aliyun_oss() key = oss_file_name filename = file total_size = filename.size part_size = determine_part_size(total_size, preferred_size=100 * 1024) # 初始化分片 upload_id = bucket.init_multipart_upload(key).upload_id parts = [] # 逐个上传分片 part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(file, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传 bucket.complete_multipart_upload(key, upload_id, parts) # # 验证一下 # # assert bucket.get_object(key).read() == file.read() return file_name except Exception: logger.error("Exceptions: {}".format(sys.exc_info()[0]))
def OssSliceUpload(): accessKeyId = sys.argv[1] accessSecret = sys.argv[2] EndPoint = sys.argv[3] Bucket = sys.argv[4] RemoteFile = sys.argv[5] LocalFile = sys.argv[6] auth = oss2.Auth(accessKeyId, accessSecret) bucket = oss2.Bucket(auth, EndPoint, Bucket) key = RemoteFile filename = LocalFile total_size = os.path.getsize(filename) part_size = determine_part_size(total_size, preferred_size=100 * 1024) # 初始化分片 upload_id = bucket.init_multipart_upload(key).upload_id parts = [] # 逐个上传分片 with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part( key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传 bucket.complete_multipart_upload(key, upload_id, parts) # 验证一下 with open(filename, 'rb') as fileobj: assert bucket.get_object(key).read() == fileobj.read()
def upload(self): psize = oss2.determine_part_size(self.__totalSize, preferred_size=self.__partSize) # 初始化分片 self.__uploadId = self.__bucket.init_multipart_upload( self.__key).upload_id # 逐个上传分片 with open(AliyunVodUtils.toUnicode(self.__fileName), 'rb') as fileObj: partNumber = 1 offset = 0 while offset < self.__totalSize: uploadSize = min(psize, self.__totalSize - offset) logger.info( "UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s" % (self.__fileName, self.__videoId, self.__uploadId, partNumber, uploadSize)) result = self.__bucket.upload_part( self.__key, self.__uploadId, partNumber, SizedFileAdapter(fileObj, uploadSize)) self.__finishedParts.append(PartInfo(partNumber, result.etag)) offset += uploadSize partNumber += 1 # 完成分片上传 self.__bucket.complete_multipart_upload(self.__key, self.__uploadId, self.__finishedParts, headers=self.__headers) return result
def upload_multiple_object(self, bucket_name, obj_name, obj_file): try: bucket = oss2.Bucket(auth, url, bucket_name) total_size = os.path.getsize(obj_file) # Use determine_part_size to determine the part size. part_size = determine_part_size(total_size, preferred_size=100 * 1024) # Initialize a multipart upload event. upload_id = bucket.init_multipart_upload(obj_name).upload_id parts = [] # Upload parts one by one. with open(obj_file, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # The SizedFileAdapter(fileobj, size) method generates a new object, and re-calculates the initial append location. result = bucket.upload_part( obj_name, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # Complete multipart upload. bucket.complete_multipart_upload(obj_name, upload_id, parts) # Verify the multipart upload. with open(obj_file, 'rb') as fileobj: assert bucket.get_object(obj_name).read() == fileobj.read() status = "success" except Exception as e: status = str(e) return status
def oss_upload(osskey, filename, uploadpath, de_suffix=True): try: # 当 de_suffix 是Ture,使用正则把文件的后缀去掉 if de_suffix is True: osskey = re.match(r'\w+', filename).group() # 阿里云主账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录 https://ram.console.aliyun.com 创建RAM账号。 auth = oss2.Auth(ACCESSKEYID, ACCESSKEYSECRET) # Endpoint以杭州为例,其它Region请按实际情况填写。 bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME) key = uploadpath + osskey filename = filename total_size = os.path.getsize(filename) # determine_part_size方法用来确定分片大小。 part_size = determine_part_size(total_size, preferred_size=10240 * 1000) # 初始化分片。 upload_id = bucket.init_multipart_upload(key).upload_id parts = [] # 逐个上传分片。 with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。 result = bucket.upload_part( key, upload_id, part_number, SizedFileAdapter( fileobj, num_to_upload, ), ) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传 bucket.complete_multipart_upload(key, upload_id, parts) except Exception as f: Print.error(f)
def test_upload_part(self): key = 'traffic-limit-test-resumble-upload-object' # Create tmp file 2MB file_name = self._prepare_temp_file_with_size(OBJECT_SIZE_1MB * 2) total_size = os.path.getsize(file_name) # Determine part size is 1MB part_size = determine_part_size(total_size, preferred_size=(1024 * 1024)) # Init part upload_id = self.bucket.init_multipart_upload(key).upload_id parts = [] headers = dict() headers[OSS_TRAFFIC_LIMIT] = str(LIMIT_100KB) # Upload part with open(file_name, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # Upload part with traffic limit setting start_time_sec = int(time.time()) result = self.bucket.upload_part(key, upload_id, part_number, SizedFileAdapter( fileobj, num_to_upload), headers=headers) end_time_sec = int(time.time()) # Calculate expensed time expense_time_sec = end_time_sec - start_time_sec # Theoretical time is 1MB/100KB = 10s, set the minimum theoretical time to 10*0.7s theoretical_exepnse_min = 10 * 0.7 # Compare to minimum theoretical time self.assertEqual((expense_time_sec > theoretical_exepnse_min), True) self.assertEqual((expense_time_sec > theoretical_exepnse_min), True) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 result = self.bucket.complete_multipart_upload(key, upload_id, parts) self.assertEqual(result.status, 200)
def multipartUpload(self): psize = oss2.determine_part_size(self.__totalSize, preferred_size=self.__partSize) # 初始化分片 self.__uploadId = self.__bucket.init_multipart_upload( self.__object).upload_id startTime = time.time() expireSeconds = 2500 # 上传凭证有效期3000秒,提前刷新 # 逐个上传分片 with open(AliyunVodUtils.toUnicode(self.__filePath), 'rb') as fileObj: partNumber = 1 offset = 0 while offset < self.__totalSize: uploadSize = min(psize, self.__totalSize - offset) #logger.info("UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s" % (self.__fileName, self.__videoId, self.__uploadId, partNumber, uploadSize)) result = self.__bucket.upload_part( self.__object, self.__uploadId, partNumber, SizedFileAdapter(fileObj, uploadSize)) #print(result.request_id) self.__finishedParts.append(PartInfo(partNumber, result.etag)) offset += uploadSize partNumber += 1 # 上传进度回调 self.__progressCallback(offset, self.__totalSize) if self.__uploadInfo['MediaType'] == 'video': # 上报上传进度 self.__reportUploadProgress('multipart', partNumber - 1, offset) # 检测上传凭证是否过期 nowTime = time.time() if nowTime - startTime >= expireSeconds: self.__bucket = self.__refreshAuthCallback( self.__uploadInfo['MediaId']) startTime = nowTime # 完成分片上传 self.__bucket.complete_multipart_upload(self.__object, self.__uploadId, self.__finishedParts, headers=self.__headers) return result
def upload(oss_task_zip, zip_task_file, endpoint, bucket_name): dlog.debug(f"debug: upload: oss_task_zip:{oss_task_zip}; zip_task_file:{zip_task_file}") bucket = _get_oss_bucket(endpoint, bucket_name) total_size = os.path.getsize(zip_task_file) part_size = determine_part_size(total_size, preferred_size=1000 * 1024) upload_id = bucket.init_multipart_upload(oss_task_zip).upload_id parts = [] with open(zip_task_file, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part(oss_task_zip, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # result = bucket.complete_multipart_upload(oss_task_zip, upload_id, parts) result = bucket.complete_multipart_upload(oss_task_zip, upload_id, parts) # print('debug:upload_result:', result, dir()) return result
def push_by_piece(self, path, name): path = Path(path) total_size = path.stat().st_size part_size = determine_part_size(total_size, preferred_size=1024 * 1024) upload_id = self.bucket.init_multipart_upload(name).upload_id parts = [] with open(path, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。 result = self.bucket.upload_part(name, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 headers = {'Content-MD5': self.get_md5(path)} self.bucket.complete_multipart_upload(name, upload_id, parts, headers=headers)
def main(args): filename = args.upload_file key = os.path.split(filename)[1] for param in (access_key_id, access_key_secret, bucket_name, endpoint): assert '<' not in param, '请设置参数:' + param # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行 bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) total_size = os.path.getsize(filename) part_size = determine_part_size(total_size, preferred_size=100000 * 1024) # 初始化分片 print('OSS initializing:') encode_md5 = calculate_file_md5(filename) print('%s s md5 is %s' % (filename, encode_md5)) upload_id = bucket.init_multipart_upload(key).upload_id parts = [] # 逐个上传分片 print('Seperate the file batch and uploading:') with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload), progress_callback=percentage) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传 print('Complete the uploading task:') result = bucket.complete_multipart_upload(key, upload_id, parts) print('upload results: %i' % result) print ('upload complete with the file %s' % key)
def upload(filename, key): total_size = os.path.getsize(filename) part_size = determine_part_size(total_size, preferred_size=100 * 1024) END_POINT = 'oss-cn-shanghai.aliyuncs.com' AUTH = Auth('LTAIStvC4wpBWRVG', 'BNXtvOz82JjzlSLjPBdQJyEUpXi4PD') BUCKET_NAME = "yunbeifeng" bucket = Bucket(AUTH, END_POINT, bucket_name=BUCKET_NAME) upload_id = bucket.init_multipart_upload(key).upload_id parts = [] with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part( key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 bucket.complete_multipart_upload(key, upload_id, parts)
result = bucket.init_multipart_upload(key, headers=headers) sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION) data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION) kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID) print('sse_algorithm:', sse_algo) print('data_algorithm:', data_algo) print('kms_key_id:', kms_key_id) upload_id = result.upload_id parts = [] # 逐个上传分片。 with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。 result = bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传。 bucket.complete_multipart_upload(key, upload_id, parts) # 验证分片上传。 with open(filename, 'rb') as fileobj: assert bucket.get_object(key).read() == fileobj.read()
def test_multipart_upload(self): auth = oss2.Auth(OSS_ID, OSS_SECRET) bucket_name = OSS_BUCKET + "-test-multipart-upload-data-encryption" bucket = oss2.Bucket(auth, self.endpoint, bucket_name) bucket.create_bucket() key = 'data-encryption-test-upload-part-object' filename = self._prepare_temp_file_with_size(1024 * 1024) headers = dict() headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4 total_size = os.path.getsize(filename) # Set part size part_size = determine_part_size(total_size, preferred_size=(100*1024)) # Init multipart with encryption headers. result = bucket.init_multipart_upload(key, headers=headers) ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION) ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION) ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID) self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo) self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo) self.assertIsNotNone(ret_kms_key_id) kms_key_id = ret_kms_key_id upload_id = result.upload_id parts = [] # Upload part with the encyrption headers will be failed. headers = dict() headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4 with open(filename, 'rb') as fileobj: part_number = 1 num_to_upload = part_size self.assertRaises(oss2.exceptions.InvalidArgument, bucket.upload_part, key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload), headers=headers) # Upload part with none encryption headers. with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION) ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION) ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID) self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo) self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo) self.assertEqual(kms_key_id, ret_kms_key_id) # Complete multipart upload with encryption headers. self.assertRaises(oss2.exceptions.InvalidArgument, bucket.complete_multipart_upload, key, upload_id, parts, headers=headers) # Complete multipart upload with none encryption headers. result = bucket.complete_multipart_upload(key, upload_id, parts) self.assertEqual(result.status, 200) ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION) ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION) ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID) self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo) self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo) self.assertEqual(kms_key_id, ret_kms_key_id) bucket.delete_object(key)
# 开始分片 total_size = os.path.getsize(filename) part_size = determine_part_size(total_size, preferred_size=100 * 1024) # 初始化分片 upload_id = bucket.init_multipart_upload(remote_filename).upload_id parts = [] # 逐个上传分片 with open(filename, 'rb') as fileobj: part_number = 1 offset = 0 while offset < total_size: num_to_upload = min(part_size, total_size - offset) result = bucket.upload_part(remote_filename, upload_id, part_number,SizedFileAdapter(fileobj, num_to_upload)) parts.append(PartInfo(part_number, result.etag)) offset += num_to_upload part_number += 1 # 完成分片上传 result = bucket.complete_multipart_upload(remote_filename, upload_id, parts) # CRC64验证 def calculate_file_crc64(file_name, block_size=1024 * 1024, init_crc=0): """计算文件的CRC64 :param file_name: 文件名 :param block_size: 计算CRC64的数据块大小,默认1024KB :return 文件内容的CRC64值