def test_determine_part_size(self):
        self.assertEqual(oss2.determine_part_size(oss2.defaults.part_size + 1), oss2.defaults.part_size)

        self.assertEqual(oss2.determine_part_size(1), 1)
        self.assertEqual(oss2.determine_part_size(1, oss2.defaults.part_size+1), 1)

        n = 10000
        size = (oss2.defaults.part_size + 1) * n
        part_size = oss2.determine_part_size(size)

        self.assertTrue(n * part_size <= size)
        self.assertTrue(oss2.defaults.part_size < part_size)
    def test_determine_part_size(self):
        self.assertEqual(oss2.determine_part_size(oss2.defaults.part_size + 1),
                         oss2.defaults.part_size)

        self.assertEqual(oss2.determine_part_size(1), 1)
        self.assertEqual(
            oss2.determine_part_size(1, oss2.defaults.part_size + 1), 1)

        n = 10000
        size = (oss2.defaults.part_size + 1) * n
        part_size = oss2.determine_part_size(size)

        self.assertTrue(n * part_size <= size)
        self.assertTrue(oss2.defaults.part_size < part_size)
Beispiel #3
0
 def upload_chunk_file(self, cloud_file, file_to_upload):
     bucket = self.__connect_oss()
     total_size = os.path.getsize(file_to_upload)
     part_size = oss2.determine_part_size(total_size, preferred_size=self.__file_chunk_size)
     file_part_count = (total_size / part_size) + 1
     upload_id = bucket.init_multipart_upload(cloud_file).upload_id
     chunk_information = []
     logger.info("开始分片存储%s " % file_to_upload )  # TODO 并发上传
     startTime = time.time()
     for chunkIdx in xrange(file_part_count):
         offset = part_size * chunkIdx
         bytes = min(part_size, total_size - offset)
         fp = FileChunkIO(file_to_upload, 'r', offset=offset, bytes=bytes)
         chunk_information.append(dict(
             bucket=bucket,
             cloud_file=cloud_file,
             upload_id=upload_id,
             part_number=chunkIdx+1,
             file_size_adapter=fp,
         ))
     muti_upload = []
     for each in range(self.muti_upload_chunk_num):
         muti_upload.append(gevent.spawn(self.complicate_upload, chunk_information))
     gevent.joinall(muti_upload)
     bucket.complete_multipart_upload(cloud_file, upload_id, self.__parts)
     endTime = time.time()
     spendTime = endTime - startTime
     logger.info("Upload file %s spent %f second." % (file_to_upload, spendTime))
Beispiel #4
0
    def uploadFile(self):
        filelist = self.file_name()
        if filelist:
            try:
                for file in filelist:
                    fileDir = file.split(self.dirname)[1]
                    key = (self.dirname + fileDir).replace('\\', '/')
                    total_size = os.path.getsize(file)
                    # determine_part_size方法用来确定分片大小。
                    part_size = determine_part_size(total_size, preferred_size=100 * 1024)

                    # 初始化分片。
                    upload_id = self.bucket.init_multipart_upload(key).upload_id
                    parts = []

                    # 逐个上传分片。
                    with open(file, 'rb') as fileobj:
                        part_number = 1
                        offset = 0
                        while offset < total_size:
                            num_to_upload = min(part_size, total_size - offset)
                            # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
                            result = self.bucket.upload_part(key, upload_id, part_number,
                                                             SizedFileAdapter(fileobj, num_to_upload))
                            parts.append(PartInfo(part_number, result.etag))
                            offset += num_to_upload
                            part_number += 1

                    # 完成分片上传。
                    self.bucket.complete_multipart_upload(key, upload_id, parts)
                logging.info('upload file to yun ok')
                self.sendMail()
            except Exception as e:
                logging.info('upload file to yun error')
                logging.error(e)
Beispiel #5
0
    def upload_chunk_file(self, cloud_file, file_to_upload):
        bucket = self.__connect_oss()
        total_size = os.path.getsize(file_to_upload)
        part_size = oss2.determine_part_size(total_size, preferred_size=self.__file_chunk_size)
        file_part_count = (total_size / part_size) + 1
        upload_id = bucket.init_multipart_upload(cloud_file).upload_id
        parts = []
        print "开始分片存储%s。。。。。" % file_to_upload
        startTime = time.time()
        with open(file_to_upload, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                result = bucket.upload_part(cloud_file, upload_id, part_number,
                                            oss2.SizedFileAdapter(fileobj, num_to_upload))
                parts.append(oss2.models.PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1
                print "upload chunk %d" % (part_number - 1)

        bucket.complete_multipart_upload(cloud_file, upload_id, parts)
        endTime = time.time()
        spendTime = endTime - startTime
        print "Upload file spend %f second." % (spendTime)
    def upload(self):
        psize = oss2.determine_part_size(self.__totalSize, preferred_size=self.__partSize)
        
        # 初始化分片
        self.__uploadId = self.__bucket.init_multipart_upload(self.__key).upload_id

        startTime = time.time()
        expireSeconds = 2500    # 上传凭证有效期3000秒,提前刷新
        # 逐个上传分片
        with open(AliyunVodUtils.toUnicode(self.__fileName), 'rb') as fileObj:
            partNumber = 1
            offset = 0
            while offset < self.__totalSize:
                uploadSize = min(psize, self.__totalSize - offset)
                #logger.info("UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s" % (self.__fileName, self.__videoId, self.__uploadId, partNumber, uploadSize))
                result = self.__bucket.upload_part(self.__key, self.__uploadId, partNumber, SizedFileAdapter(fileObj,uploadSize))
                #print(result.request_id)
                self.__finishedParts.append(PartInfo(partNumber, result.etag))
                offset += uploadSize
                partNumber += 1

                # 上传进度回调
                self.__progressCallback(offset, self.__totalSize)

                # 检测上传凭证是否过期
                nowTime = time.time()
                if nowTime - startTime >= expireSeconds:
                    self.__bucket = self.__refreshAuthCallback(self.__videoId)
                    startTime = nowTime

        # 完成分片上传
        self.__bucket.complete_multipart_upload(self.__key, self.__uploadId, self.__finishedParts, headers=self.__headers)
        
        return result
    def upload_multiple_object(self, bucket_name, obj_name, obj_file):
        try:
            bucket = oss2.Bucket(auth, url, bucket_name)
            total_size = os.path.getsize(obj_file)
            # Use determine_part_size to determine the part size.
            part_size = determine_part_size(total_size,
                                            preferred_size=100 * 1024)
            # Initialize a multipart upload event.
            upload_id = bucket.init_multipart_upload(obj_name).upload_id
            parts = []
            # Upload parts one by one.
            with open(obj_file, 'rb') as fileobj:
                part_number = 1
                offset = 0
                while offset < total_size:
                    num_to_upload = min(part_size, total_size - offset)
                    # The SizedFileAdapter(fileobj, size) method generates a new object, and re-calculates the initial append location.
                    result = bucket.upload_part(
                        obj_name, upload_id, part_number,
                        SizedFileAdapter(fileobj, num_to_upload))
                    parts.append(PartInfo(part_number, result.etag))
                    offset += num_to_upload
                    part_number += 1

            # Complete multipart upload.
            bucket.complete_multipart_upload(obj_name, upload_id, parts)
            # Verify the multipart upload.
            with open(obj_file, 'rb') as fileobj:
                assert bucket.get_object(obj_name).read() == fileobj.read()
            status = "success"
        except Exception as e:
            status = str(e)
        return status
Beispiel #8
0
    def upload(self, upload_path, filepath):
        """
        upload_path 文件上传后的完整路径包括本身
        filepath 本地文件路径
        """
        key = upload_path
        filename = filepath

        total_size = os.path.getsize(filename)
        # determine_part_size方法用来确定分片大小。
        part_size = determine_part_size(total_size, preferred_size=100 * 1024)

        # 初始化分片。
        upload_id = self.bucket.init_multipart_upload(key).upload_id
        parts = []

        # 逐个上传分片。
        with open(filename, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
                result = self.bucket.upload_part(
                    key, upload_id, part_number,
                    SizedFileAdapter(fileobj, num_to_upload))
                parts.append(PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1

        # 完成分片上传。
        self.bucket.complete_multipart_upload(key, upload_id, parts)
Beispiel #9
0
def main(key, filename):
    auth = oss2.Auth('accessId', 'accessSecret')
    bucket = oss2.Bucket(auth, 'endpoint.aliyuncs.com', 'bucket')

    total_size = os.path.getsize(filename)
    part_size = oss2.determine_part_size(total_size,
                                         preferred_size=10 * 1024 * 1024)

    # init
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []

    print 'start to upload {} with id {}'.format(filename, upload_id)

    # upload
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                oss2.SizedFileAdapter(fileobj, num_to_upload))
            parts.append(oss2.models.PartInfo(part_number, result.etag))

            offset += num_to_upload
            part_number += 1

    # complete
    bucket.complete_multipart_upload(key, upload_id, parts)
    print 'done!'
Beispiel #10
0
def large_file_to_oss(id, file, type_name='courses'):
    try:
        file_name = ''
        if file:
            _, file_ext = os.path.splitext(file.name)
            md = hashlib.md5()
            md.update((str(time.time()) + file.name).encode('utf8'))
            file_name = type_name + '/' + str(id) + '/' + md.hexdigest() + file_ext
            oss_file_name = os.path.join(settings.ALIYUN_OSS_DIRECTORY_PREFIX, file_name)
            bucket = aliyun_oss()
            key = oss_file_name
            filename = file
            total_size = filename.size
            part_size = determine_part_size(total_size, preferred_size=100 * 1024)
            # 初始化分片
            upload_id = bucket.init_multipart_upload(key).upload_id
            parts = []
            # 逐个上传分片
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                result = bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(file, num_to_upload))
                parts.append(PartInfo(part_number, result.etag))
                offset += num_to_upload
                part_number += 1
            # 完成分片上传
            bucket.complete_multipart_upload(key, upload_id, parts)
            # # 验证一下
            #
            # assert bucket.get_object(key).read() == file.read()
        return file_name
    except Exception:
        logger.error("Exceptions: {}".format(sys.exc_info()[0]))
Beispiel #11
0
def large_file_to_oss(id, file, type_name='courses'):
    try:
        file_name = ''
        if file:
            _, file_ext = os.path.splitext(file.name)
            md = hashlib.md5()
            md.update((str(time.time()) + file.name).encode('utf8'))
            file_name = type_name + '/' + str(id) + '/' + md.hexdigest() + file_ext
            oss_file_name = os.path.join(settings.ALIYUN_OSS_DIRECTORY_PREFIX, file_name)
            bucket = aliyun_oss()
            key = oss_file_name
            filename = file
            total_size = filename.size
            part_size = determine_part_size(total_size, preferred_size=100 * 1024)
            # 初始化分片
            upload_id = bucket.init_multipart_upload(key).upload_id
            parts = []
            # 逐个上传分片
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                result = bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(file, num_to_upload))
                parts.append(PartInfo(part_number, result.etag))
                offset += num_to_upload
                part_number += 1
            # 完成分片上传
            bucket.complete_multipart_upload(key, upload_id, parts)
            # # 验证一下
            #
            # assert bucket.get_object(key).read() == file.read()
        return file_name
    except Exception:
        logger.error("Exceptions: {}".format(sys.exc_info()[0]))
Beispiel #12
0
    def persist_file(self, path, buf, info, meta=None, headers=None):
        # 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
        total_size = len(buf)
        part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

        # 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
        key = os.path.join(self.objectPath, info)
        upload_id = self.bucket.init_multipart_upload(key).upload_id

        # 逐个上传分片
        # 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
        parts = []
        part_number = 1
        offset = 0
        while offset < total_size:
            size_to_upload = min(part_size, total_size - offset)
            result = self.bucket.upload_part(key, upload_id, part_number,
                                             oss2.SizedFileAdapter(buf, size_to_upload))
            parts.append(oss2.models.PartInfo(part_number,
                                              result.etag,
                                              size=size_to_upload,
                                              part_crc=result.crc))

            offset += size_to_upload
            part_number += 1

            # 完成分片上传
            self.bucket.complete_multipart_upload(key, upload_id, parts)
Beispiel #13
0
def OssSliceUpload():
    accessKeyId = sys.argv[1]
    accessSecret = sys.argv[2]
    EndPoint = sys.argv[3]
    Bucket = sys.argv[4]
    RemoteFile = sys.argv[5]
    LocalFile = sys.argv[6]

    auth = oss2.Auth(accessKeyId, accessSecret)
    bucket = oss2.Bucket(auth, EndPoint, Bucket)

    key = RemoteFile
    filename = LocalFile
    total_size = os.path.getsize(filename)
    part_size = determine_part_size(total_size, preferred_size=100 * 1024)
    # 初始化分片
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []
    # 逐个上传分片
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                SizedFileAdapter(fileobj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1
    # 完成分片上传
    bucket.complete_multipart_upload(key, upload_id, parts)
    # 验证一下
    with open(filename, 'rb') as fileobj:
        assert bucket.get_object(key).read() == fileobj.read()
    def upload(self):
        psize = oss2.determine_part_size(self.__totalSize,
                                         preferred_size=self.__partSize)

        # 初始化分片
        self.__uploadId = self.__bucket.init_multipart_upload(
            self.__key).upload_id

        # 逐个上传分片
        with open(AliyunVodUtils.toUnicode(self.__fileName), 'rb') as fileObj:
            partNumber = 1
            offset = 0
            while offset < self.__totalSize:
                uploadSize = min(psize, self.__totalSize - offset)
                logger.info(
                    "UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s"
                    % (self.__fileName, self.__videoId, self.__uploadId,
                       partNumber, uploadSize))
                result = self.__bucket.upload_part(
                    self.__key, self.__uploadId, partNumber,
                    SizedFileAdapter(fileObj, uploadSize))
                self.__finishedParts.append(PartInfo(partNumber, result.etag))
                offset += uploadSize
                partNumber += 1

        # 完成分片上传
        self.__bucket.complete_multipart_upload(self.__key,
                                                self.__uploadId,
                                                self.__finishedParts,
                                                headers=self.__headers)

        return result
    def test_upload_part(self):
        key = 'requestpayment-test-upload-part-object'
        filename = key + '.txt'
        content = b'1' * 1024 * 1024

        headers = dict()
        headers[OSS_REQUEST_PAYER] = "requester"

        content = random_bytes(1024 * 1024)

        with open(filename, 'wb') as f:
            f.write(content)

        total_size = os.path.getsize(filename)
        # Set part size
        part_size = determine_part_size(total_size, preferred_size=(100*1024))

        # Init multipart without payer setting, should be failed.
        self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.init_multipart_upload, key)

        # Init multipart with payer setting, should be successful.
        upload_id = self.payer_bucket.init_multipart_upload(key, headers=headers).upload_id
        parts = []

        # Upload part without payer setting, should be failed.
        with open(filename, 'rb') as fileobj:
            part_number = 1
            num_to_upload = part_size
            self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.upload_part, key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload))

        # Upload part with payer setting, should be successful.
        with open(filename, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                result = self.payer_bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload), headers=headers)
                parts.append(PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1

        # Complete multipart upload without payer setting, should be failed.
        self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.complete_multipart_upload, key, upload_id, parts)

        # Complete multipart upload with payer setting, should be successful.
        result = self.payer_bucket.complete_multipart_upload(key, upload_id, parts, headers=headers)
        self.assertEqual(result.status, 200)

        self.bucket.delete_object(key)
        os.remove(filename)
Beispiel #16
0
    def oss_upload(osskey, filename, uploadpath, de_suffix=True):
        try:
            # 当 de_suffix 是Ture,使用正则把文件的后缀去掉
            if de_suffix is True:
                osskey = re.match(r'\w+', filename).group()

            # 阿里云主账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录 https://ram.console.aliyun.com 创建RAM账号。
            auth = oss2.Auth(ACCESSKEYID, ACCESSKEYSECRET)
            # Endpoint以杭州为例,其它Region请按实际情况填写。
            bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME)

            key = uploadpath + osskey
            filename = filename

            total_size = os.path.getsize(filename)

            # determine_part_size方法用来确定分片大小。
            part_size = determine_part_size(total_size,
                                            preferred_size=10240 * 1000)

            # 初始化分片。
            upload_id = bucket.init_multipart_upload(key).upload_id
            parts = []
            # 逐个上传分片。
            with open(filename, 'rb') as fileobj:
                part_number = 1
                offset = 0

                while offset < total_size:
                    num_to_upload = min(part_size, total_size - offset)
                    # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
                    result = bucket.upload_part(
                        key,
                        upload_id,
                        part_number,
                        SizedFileAdapter(
                            fileobj,
                            num_to_upload,
                        ),
                    )
                    parts.append(PartInfo(part_number, result.etag))

                    offset += num_to_upload
                    part_number += 1

                # 完成分片上传
                bucket.complete_multipart_upload(key, upload_id, parts)

        except Exception as f:
            Print.error(f)
    def test_upload_part(self):
        key = 'traffic-limit-test-resumble-upload-object'
        # Create tmp file 2MB
        file_name = self._prepare_temp_file_with_size(OBJECT_SIZE_1MB * 2)

        total_size = os.path.getsize(file_name)
        # Determine part size is 1MB
        part_size = determine_part_size(total_size,
                                        preferred_size=(1024 * 1024))

        # Init part
        upload_id = self.bucket.init_multipart_upload(key).upload_id
        parts = []

        headers = dict()
        headers[OSS_TRAFFIC_LIMIT] = str(LIMIT_100KB)

        #  Upload part
        with open(file_name, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)

                # Upload part with traffic limit setting
                start_time_sec = int(time.time())
                result = self.bucket.upload_part(key,
                                                 upload_id,
                                                 part_number,
                                                 SizedFileAdapter(
                                                     fileobj, num_to_upload),
                                                 headers=headers)
                end_time_sec = int(time.time())

                # Calculate expensed time
                expense_time_sec = end_time_sec - start_time_sec
                # Theoretical time is 1MB/100KB = 10s, set the minimum theoretical time to 10*0.7s
                theoretical_exepnse_min = 10 * 0.7
                # Compare to minimum theoretical time
                self.assertEqual((expense_time_sec > theoretical_exepnse_min),
                                 True)
                self.assertEqual((expense_time_sec > theoretical_exepnse_min),
                                 True)

                parts.append(PartInfo(part_number, result.etag))
                offset += num_to_upload
                part_number += 1

        result = self.bucket.complete_multipart_upload(key, upload_id, parts)
        self.assertEqual(result.status, 200)
    def test_upload_part_copy(self):
        src_object_name = 'requestpayment-test-upload-part-copy-src'
        dest_object_name = 'requestpayment-test-upload-part-copy-dest'
        content = b'a' * 1024 * 1024

        headers = dict()
        headers[OSS_REQUEST_PAYER] = "requester"

        self.bucket.put_object(src_object_name, content)        

        # Get src object size
        head_info = self.bucket.head_object(src_object_name)
        total_size = head_info.content_length
        self.assertEqual(total_size, 1024 * 1024)

        # Set part size
        part_size = determine_part_size(total_size, preferred_size=(100*1024))

        upload_id = self.payer_bucket.init_multipart_upload(dest_object_name, headers=headers).upload_id
        parts = []

        # Upload part copy without payer setting, should be failed.
        part_number = 1
        offset = 0
        num_to_upload = min(part_size, total_size - offset)
        end = offset + num_to_upload - 1;
        self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.upload_part_copy, self.payer_bucket.bucket_name, 
                            src_object_name, (offset, end), dest_object_name, upload_id, part_number)

        # Upload part copy with payer setting, should be successful.
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            end = offset + num_to_upload - 1;
            result = self.payer_bucket.upload_part_copy(self.payer_bucket.bucket_name, src_object_name, (offset, end), 
                                dest_object_name, upload_id, part_number, headers=headers)

            parts.append(PartInfo(part_number, result.etag))

            offset += num_to_upload
            part_number += 1

        # Complete multipart upload with payer setting, should be successful.
        result = self.payer_bucket.complete_multipart_upload(dest_object_name, upload_id, parts, headers=headers)

        self.bucket.delete_object(src_object_name)
        self.bucket.delete_object(dest_object_name)
Beispiel #19
0
def test_upload_success(test_bucket_access, test_create_file):
    bucket = test_bucket_access
    bucket_stat = bucket.get_bucket_stat()
    count_before_upload = bucket_stat.object_count
    current_app.logger.info('storage: ' +
                            str(bucket_stat.storage_size_in_bytes))
    current_app.logger.info('object count: ' + str(bucket_stat.object_count))
    current_app.logger.info('multi part upload count: ' +
                            str(bucket_stat.multi_part_upload_count))
    filename = test_create_file
    # 也可以直接调用分片上传接口。
    # 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
    import os
    import oss2
    total_size = os.path.getsize(filename)
    current_app.logger.info('filesize: ' + str(total_size))
    part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)
    # 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
    key = '{0}.txt'.format(random_string(10))
    upload_id = bucket.init_multipart_upload(key).upload_id

    # 逐个上传分片
    # 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
    with open(filename, 'rb') as fileobj:
        parts = []
        part_number = 1
        offset = 0
        while offset < total_size:
            size_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                oss2.SizedFileAdapter(fileobj, size_to_upload))
            parts.append(
                oss2.models.PartInfo(part_number,
                                     result.etag,
                                     size=size_to_upload,
                                     part_crc=result.crc))

            offset += size_to_upload
            part_number += 1

        # 完成分片上传
        bucket.complete_multipart_upload(key, upload_id, parts)
    count_after_upload = bucket.get_bucket_stat().object_count
    assert count_before_upload < count_after_upload
Beispiel #20
0
def upload(oss_task_zip, zip_task_file, endpoint, bucket_name):
    dlog.debug(f"debug: upload: oss_task_zip:{oss_task_zip}; zip_task_file:{zip_task_file}")
    bucket = _get_oss_bucket(endpoint, bucket_name)
    total_size = os.path.getsize(zip_task_file)
    part_size = determine_part_size(total_size, preferred_size=1000 * 1024)
    upload_id = bucket.init_multipart_upload(oss_task_zip).upload_id
    parts = []
    with open(zip_task_file, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(oss_task_zip, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1
    # result = bucket.complete_multipart_upload(oss_task_zip, upload_id, parts)
    result = bucket.complete_multipart_upload(oss_task_zip, upload_id, parts)
    # print('debug:upload_result:', result, dir())
    return result
Beispiel #21
0
 def push_by_piece(self, path, name):
     path = Path(path)
     total_size = path.stat().st_size
     part_size = determine_part_size(total_size, preferred_size=1024 * 1024)
     upload_id = self.bucket.init_multipart_upload(name).upload_id
     parts = []
     with open(path, 'rb') as fileobj:
         part_number = 1
         offset = 0
         while offset < total_size:
             num_to_upload = min(part_size, total_size - offset)
             # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
             result = self.bucket.upload_part(name, upload_id, part_number,
                                              SizedFileAdapter(fileobj,
                                                               num_to_upload))
             parts.append(PartInfo(part_number, result.etag))
             offset += num_to_upload
             part_number += 1
     headers = {'Content-MD5': self.get_md5(path)}
     self.bucket.complete_multipart_upload(name, upload_id, parts, headers=headers)
Beispiel #22
0
    def uploadFile2OssByPart(self, file, objPath):  # 分片上传方法
        '''
        把文件对象分片上传oss
        :param file:  文件对象
        :param objPath: 上传到oss对应的文件名(路径)
        :return:
        '''
        totalSize = file.size
        # determine_part_size方法用于确定分片大小。
        partSize = oss2.determine_part_size(totalSize,
                                            preferred_size=PREFERRED_SIZE)
        uploadId = self.bucket.init_multipart_upload(objPath).upload_id
        parts = []
        # 分片上传
        executor = ThreadPoolExecutor(max_workers=1)
        allTask = []
        partNumber = 1
        offset = 0
        while offset < totalSize:
            numToUpload = min(partSize, totalSize - offset)
            print(partNumber, numToUpload)
            ## 一般上传
            # 调用SizedFileAdapter(file, size)方法会生成一个新的文件对象,重新计算起始追加位置。
            result = self.bucket.upload_part(
                objPath, uploadId, partNumber,
                oss2.SizedFileAdapter(file, numToUpload))
            parts.append(oss2.models.PartInfo(partNumber, result.etag))
            ## 多线程上传
            # allTask.append(executor.submit(_uploadPart, partNumber, numToUpload))
            # offset += numToUpload
            # partNumber += 1
        # 完成分片上传。
        wait(allTask, return_when=ALL_COMPLETED)
        resultList = [future.result() for future in as_completed(allTask)]
        for data in sorted(resultList, key=lambda x: x[0]):  # 重排序按正常数据加入parts
            partNumber, result = data[0], data[1]
            print(partNumber)
            parts.append(oss2.models.PartInfo(partNumber, result.etag))

        self.bucket.complete_multipart_upload(objPath, uploadId, parts)
        return getOssSavePath(objPath)
Beispiel #23
0
def main(args):

    filename = args.upload_file
    key = os.path.split(filename)[1]
    for param in (access_key_id, access_key_secret, bucket_name, endpoint):
        assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
    bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

    total_size = os.path.getsize(filename)
    part_size = determine_part_size(total_size, preferred_size=100000 * 1024)

    # 初始化分片
    print('OSS initializing:')
    encode_md5 = calculate_file_md5(filename)
    print('%s s md5 is %s' % (filename, encode_md5))
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []

    # 逐个上传分片
    print('Seperate the file batch and uploading:')
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(key, upload_id, part_number,
                                        SizedFileAdapter(fileobj, num_to_upload), progress_callback=percentage)
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

    # 完成分片上传
    print('Complete the uploading task:')
    result = bucket.complete_multipart_upload(key, upload_id, parts)

    print('upload results: %i' % result)
    print ('upload complete with the file %s' % key)
Beispiel #24
0
def upload(filename, key):
    total_size = os.path.getsize(filename)
    part_size = determine_part_size(total_size, preferred_size=100 * 1024)

    END_POINT = 'oss-cn-shanghai.aliyuncs.com'
    AUTH = Auth('LTAIStvC4wpBWRVG', 'BNXtvOz82JjzlSLjPBdQJyEUpXi4PD')
    BUCKET_NAME = "yunbeifeng"
    bucket = Bucket(AUTH, END_POINT, bucket_name=BUCKET_NAME)

    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                SizedFileAdapter(fileobj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

    bucket.complete_multipart_upload(key, upload_id, parts)
Beispiel #25
0
    def copy_data(self, from_key, to_key):
        """复制文件"""
        total_size = self.bucket.head_object(from_key).content_length
        part_size = determine_part_size(total_size, preferred_size=100 * 1024)

        # 初始化分片
        upload_id = self.bucket.init_multipart_upload(to_key).upload_id
        parts = []

        # 逐个分片拷贝
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            byte_range = (offset, offset + num_to_upload - 1)

            result = self.bucket.upload_part_copy(self.bucket.bucket_name, from_key
                                                  , byte_range, to_key, upload_id, part_number)
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

        # 完成分片上传
        self.bucket.complete_multipart_upload(to_key, upload_id, parts)
Beispiel #26
0
    def _store_in_thread(self, file):
        # 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
        total_size = os.path.getsize(file)
        part_size = oss2.determine_part_size(total_size,
                                             preferred_size=128 * 1024)

        # 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
        key = file.replace('../', '')
        upload_id = self.bucket.init_multipart_upload(key).upload_id

        # 逐个上传分片
        # 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
        with open(file, 'rb') as fileobj:
            parts = []
            part_number = 1
            offset = 0
            while offset < total_size:
                size_to_upload = min(part_size, total_size - offset)
                result = self.bucket.upload_part(
                    key, upload_id, part_number,
                    oss2.SizedFileAdapter(fileobj, size_to_upload))
                parts.append(
                    oss2.models.PartInfo(part_number,
                                         result.etag,
                                         size=size_to_upload,
                                         part_crc=result.crc))

                offset += size_to_upload
                part_number += 1

            # 完成分片上传
            self.bucket.complete_multipart_upload(key, upload_id, parts)

        # 验证一下
        with open(file, 'rb') as fileobj:
            assert self.bucket.get_object(key).read() == fileobj.read()
    def test_multipart_upload(self):
        auth = oss2.Auth(OSS_ID, OSS_SECRET)
        bucket_name = OSS_BUCKET + "-test-multipart-upload-data-encryption"
        bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
        bucket.create_bucket()

        key = 'data-encryption-test-upload-part-object'
        filename = self._prepare_temp_file_with_size(1024 * 1024)

        headers = dict()
        headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
        headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4

        total_size = os.path.getsize(filename)
        # Set part size
        part_size = determine_part_size(total_size, preferred_size=(100*1024))

        # Init multipart with encryption headers.
        result = bucket.init_multipart_upload(key, headers=headers)
        ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
        ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
        ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
        self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
        self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
        self.assertIsNotNone(ret_kms_key_id)

        kms_key_id = ret_kms_key_id
        upload_id = result.upload_id
        parts = []

        # Upload part with the encyrption headers will be failed.
        headers = dict()
        headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
        headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
        with open(filename, 'rb') as fileobj:
            part_number = 1
            num_to_upload = part_size
            self.assertRaises(oss2.exceptions.InvalidArgument, bucket.upload_part, key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload), headers=headers)

        # Upload part with none encryption headers.
        with open(filename, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)

                result = bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload))

                parts.append(PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1

                ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
                ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
                ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
                self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
                self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
                self.assertEqual(kms_key_id, ret_kms_key_id)

        # Complete multipart upload with encryption headers.
        self.assertRaises(oss2.exceptions.InvalidArgument, bucket.complete_multipart_upload, key, upload_id, parts, headers=headers)

        # Complete multipart upload with none encryption headers.
        result = bucket.complete_multipart_upload(key, upload_id, parts)
        self.assertEqual(result.status, 200)
        ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
        ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
        ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
        self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
        self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
        self.assertEqual(kms_key_id, ret_kms_key_id)

        bucket.delete_object(key)
Beispiel #28
0
    def copy(self, src, dst):
        raw_src, raw_dst = str(src), str(dst)
        try:
            cloud_src = src.startswith('oss://')
            cloud_dst = dst.startswith('oss://')
            if not cloud_src and not cloud_dst:
                return super().copy(src, dst)

            if src == dst:
                return
            # download
            if cloud_src and not cloud_dst:
                target_dir, _ = os.path.split(dst)
                if not os.path.exists(target_dir):
                    os.makedirs(target_dir)
                bucket, src = self._split(src)
                obj = bucket.get_object(src)
                if obj.content_length > 100 * 1024 ** 2:  # 100M
                    with oss_progress('downloading') as callback:
                        bucket.get_object_to_file(src, dst, progress_callback=callback)
                else:
                    bucket.get_object_to_file(src, dst)
                return
            bucket, dst = self._split(dst)
            # upload
            if cloud_dst and not cloud_src:
                src_size = os.stat(src).st_size
                if src_size > 5 * 1024 ** 3:  # 5G
                    raise RuntimeError(f'A file > 5G cannot be uploaded to OSS. Please split your file first.\n{src}')
                if src_size > 100 * 1024 ** 2:  # 100M
                    with oss_progress('uploading') as callback:
                        bucket.put_object_from_file(dst, src, progress_callback=callback)
                else:
                    bucket.put_object_from_file(dst, src)
                return
            # copy between oss paths
            src_bucket, src = self._split(src)
            total_size = src_bucket.head_object(src).content_length
            if src_bucket.get_bucket_location().location != bucket.get_bucket_location().location:
                import tempfile
                local_tmp = os.path.join(tempfile.gettempdir(), src)
                self.copy(f'oss://{src_bucket.bucket_name}/{src}', local_tmp)
                self.copy(local_tmp, f'oss://{bucket.bucket_name}/{dst}')
                self.remove(local_tmp)
                return

            if total_size < 1024 ** 3 or src_bucket != bucket:  # 1GB
                bucket.copy_object(src_bucket.bucket_name, src, dst)
            else:
                # multipart copy
                from oss2.models import PartInfo
                from oss2 import determine_part_size
                part_size = determine_part_size(total_size, preferred_size=100 * 1024)
                upload_id = bucket.init_multipart_upload(dst).upload_id
                parts = []

                part_number = 1
                offset = 0
                while offset < total_size:
                    num_to_upload = min(part_size, total_size - offset)
                    byte_range = (offset, offset + num_to_upload - 1)

                    result = bucket.upload_part_copy(bucket.bucket_name, src, byte_range, dst, upload_id,
                                                     part_number)
                    parts.append(PartInfo(part_number, result.etag))

                    offset += num_to_upload
                    part_number += 1

                bucket.complete_multipart_upload(dst, upload_id, parts)
        except Exception as e:
            print("haha")
            print("{}".format(e))
            print("Copy failed because oss auth not fully opened. Using first download then upload...")
            try:
                self.download(raw_src, ".easy_distill_tmp_file")
                self.upload(".easy_distill_tmp_file", raw_dst)
                print("Copying done")
            except Exception as e:
                print("{}".format(e))
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
print('sse_algorithm:', sse_algo)
print('data_algorithm:', data_algo)
print('kms_key_id:', kms_key_id)

# 读取上传内容
object_stream = bucket.get_object(key)
print(object_stream.read())

# ##########以下是使用分片上传接口上传文件时单独指定文件的服务端加密方式的示例############
key = 'test-upload_file'
filename = '<yourLocalFile>'

total_size = os.path.getsize(filename)
# determine_part_size方法用来确定分片大小。
part_size = determine_part_size(total_size, preferred_size=100 * 1024)

# 在headers中指定加密方式
headers = dict()
# 使用OSS server端SM4加密算法
headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_SM4

# 初始化分片时指定文件在服务端端加密类型
result = bucket.init_multipart_upload(key, headers=headers)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
print('sse_algorithm:', sse_algo)
print('data_algorithm:', data_algo)
print('kms_key_id:', kms_key_id)
with open(filename, 'wb') as fileobj:
    fileobj.write(content)

# 断点续传一:因为文件比较小(小于oss2.defaults.multipart_threshold),
# 所以实际上用的是oss2.Bucket.put_object
oss2.resumable_upload(bucket, 'remote-normal.txt', filename)

# 断点续传二:为了展示的需要,我们指定multipart_threshold可选参数,确保使用分片上传
oss2.resumable_upload(bucket, 'remote-multipart.txt', filename, multipart_threshold=100 * 1024)


# 也可以直接调用分片上传接口。
# 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
total_size = os.path.getsize(filename)
part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

# 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
key = 'remote-multipart2.txt'
upload_id = bucket.init_multipart_upload(key).upload_id

# 逐个上传分片
# 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于num_to_upload
with open(filename, 'rb') as fileobj:
    parts = []
    part_number = 1
    offset = 0
    while offset < total_size:
        num_to_upload = min(part_size, total_size - offset)
        result = bucket.upload_part(key, upload_id, part_number,
                                    oss2.SizedFileAdapter(fileobj, num_to_upload))
Beispiel #31
0
    fileobj.write(content)

# 断点续传一:因为文件比较小(小于oss2.defaults.multipart_threshold),
# 所以实际上用的是oss2.Bucket.put_object
oss2.resumable_upload(bucket, 'remote-normal.txt', filename)

# 断点续传二:为了展示的需要,我们指定multipart_threshold可选参数,确保使用分片上传
oss2.resumable_upload(bucket,
                      'remote-multipart.txt',
                      filename,
                      multipart_threshold=100 * 1024)

# 也可以直接调用分片上传接口。
# 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
total_size = os.path.getsize(filename)
part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

# 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
key = 'remote-multipart2.txt'
upload_id = bucket.init_multipart_upload(key).upload_id

# 逐个上传分片
# 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于num_to_upload
with open(filename, 'rb') as fileobj:
    parts = []
    part_number = 1
    offset = 0
    while offset < total_size:
        num_to_upload = min(part_size, total_size - offset)
        result = bucket.upload_part(
            key, upload_id, part_number,
    def uploadFile(self):
        """
        Function check if cache file with uploading information is exist and try to continue uplod otherwise start new 
        file uploading to OSS
        :return: 
        """
        total_size = os.stat(self.queue_object.path).st_size
        part_size = oss2.determine_part_size(total_size, preferred_size=2 * 1024 * 1024)

        try:
            with open(self.pkl_path, 'rb') as input_file:  # We try to open file with title of uploading key
                storied_object = pickle.load(input_file)  # if file is open means upload was not finished
                parts = storied_object.parts
                upload_id = storied_object.upload_id

                bucket_name = storied_object.bucket_name
                endpoint = storied_object.endpoint
                bucket = oss2.Bucket(self._auth, endpoint, bucket_name)
                # offset = storied_object.offset
        except IOError:
            bucket = oss2.Bucket(self._auth, self.queue_object.endpoint, self.queue_object.bucket_name)
            upload_id = bucket.init_multipart_upload(self.queue_object.key).upload_id
            storied_object = UploadingObject()
            uploade_parts = bucket.list_multipart_uploads()

            for part in uploade_parts.upload_list:
                if part.upload_id == upload_id:
                    with open(self.pkl_path, 'wb') as output_file:
                        storied_object.key = self.queue_object.key
                        storied_object.bucket_name = self.queue_object.bucket_name
                        storied_object.endpoint = self.queue_object.endpoint
                        storied_object.path = self.queue_object.path
                        storied_object.upload_id = upload_id
                        storied_object.initiate_date = part.initiation_date
                        pickle.dump(storied_object, output_file, pickle.HIGHEST_PROTOCOL)

            parts = []

        with open(self.queue_object.path, 'rb') as fileobj:
            while storied_object.offset < total_size and not self.stoprequest.isSet():
                # print storied_object.part_number
                # print storied_object.parts
                num_to_upload = min(part_size, total_size - storied_object.offset)
                upload_content = oss2.SizedFileAdapter(fileobj, num_to_upload)
                try:
                    result = bucket.upload_part(self.queue_object.key,
                                                upload_id,
                                                storied_object.part_number,
                                                upload_content)
                except NoSuchUpload:
                    print "\n ==== Not finished upload not exist on  OSS bucket ===="
                    print " Clean local cache, and update uploading queue"
                    os.remove(self.pkl_path)
                    raise UploadIdNotInBucket("Upload id is not in bucket")

                # Append directly to class is didn't work with "pickle"
                parts.append(oss2.models.PartInfo(storied_object.part_number, result.etag))
                storied_object.parts = parts

                if num_to_upload == part_size:
                    percentage = str(self._percentage(num_to_upload * storied_object.part_number, total_size))
                else:
                    percentage = 'Complete'

                # print percentage

                # logging.debug("File: %s => Bucket: %s - %s", key, bucket_name, percentage)
                # print "File: {0} => Bucket: {1} - {2}".format(self.queue_object.key.encode("utf-8"),
                #                                               self.queue_object.bucket_name,
                #                                               percentage)

                sys.stdout.write("\rFile: {0} => Bucket: {1} - {2}".format(self.queue_object.key.encode("utf-8"),
                                                              self.queue_object.bucket_name,
                                                              percentage)).splitlines()
                sys.stdout.flush()

                storied_object.offset += num_to_upload

                storied_object.part_number += 1
                with open(self.pkl_path, 'wb') as output_file:
                    pickle.dump(storied_object, output_file, pickle.HIGHEST_PROTOCOL)

            if not self.stoprequest.isSet():
                bucket.complete_multipart_upload(self.queue_object.key, upload_id, parts)
                os.remove(self.pkl_path)
def handler(environ, start_response):
    context = environ['fc.context']
    request_uri = environ['fc.request_uri']
    for k, v in environ.items():
        if k.startswith("HTTP_"):
            # process custom request headers
            print(k, v)
            pass

    # get request body
    try:
        request_body_size = int(environ.get('CONTENT_LENGTH', 0))
    except (ValueError):
        request_body_size = 0
    request_body = environ['wsgi.input'].read(request_body_size)
    request_body = urllib.unquote(request_body).decode('utf8')

    # get request method
    request_method = environ['REQUEST_METHOD']
    if request_method != 'POST':
        status = '400 Bad Request'
        response_headers = [('Content-type', 'application/json')]
        start_response(status, response_headers)
        data = json.dumps({"error": "invalid request method."})
        return [data]

    # print request body
    print('request_body: {}'.format(request_body))
    request_body_json = json.loads(request_body)

    creds = context.credentials
    auth = oss2.StsAuth(creds.accessKeyId, creds.accessKeySecret,
                        creds.securityToken)

    items = request_body_json.get("items")
    print("[DEBUG] items: {0}".format(items))

    # zip name
    re_code = request_body_json.get("re_code")
    is_full = request_body_json.get("full")
    uuid = request_body_json.get("uuid")
    tmpdir = '/tmp/download/'

    os.system("rm -rf /tmp/*")
    os.mkdir(tmpdir)

    # download
    for item in items:
        print("[DEBUG] item: {}".format(item))

        oss_protocol = item.get("protocol")
        oss_bucket_name = item.get("bucket")
        oss_endpoint = item.get("endpoint")
        file_path = item.get("path")
        file_original_name = item.get("original_name")

        bucket = oss2.Bucket(auth, oss_endpoint, oss_bucket_name)

        bucket.get_object_to_file(file_path, tmpdir + file_original_name)

    #zip file
    zipname = '/tmp/' + re_code + '.zip'
    make_zip(tmpdir, zipname)

    #upload
    total_size = os.path.getsize(zipname)
    part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

    if is_full:
        zip_path = 'full-archive/' + re_code + '/' + uuid + '.zip'
    else:
        zip_path = 'custom-archive/' + re_code + '/' + uuid + '.zip'

    # use the last bucket to upload zip package
    upload_id = bucket.init_multipart_upload(zip_path).upload_id

    with open(zipname, 'rb') as fileobj:
        parts = []
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                zip_path, upload_id, part_number,
                oss2.SizedFileAdapter(fileobj, num_to_upload))
            parts.append(oss2.models.PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

        bucket.complete_multipart_upload(zip_path, upload_id, parts)

    zip_meta = bucket.head_object(zip_path)
    zip_content_type = zip_meta.headers.get('Content-Type')

    status = '200 OK'
    response_headers = [('Content-type', 'application/json')]
    start_response(status, response_headers)
    url = oss_protocol + "://" + oss_bucket_name + "." + oss_endpoint + "/" + zip_path
    data = json.dumps({
        "host": url,
        "protocol": oss_protocol,
        "bucket": oss_bucket_name,
        "endpoint": oss_endpoint,
        "path": zip_path,
        "original_name": re_code + ".zip",
        "type": zip_content_type,
    })
    return [data]