def test_complete(self, do_request):
        from oss2.models import PartInfo

        parts = list()
        parts.append(PartInfo(2, '9433E6178C51CFEC867F592F4B827B50'))
        parts.append(PartInfo(3, '5570B91F31EBB06B6BA93BA6D63BE68A'))

        body = b'''<?xml version="1.0" encoding="UTF-8"?>
        <CompleteMultipartUploadResult>
          <Location>http://ming-oss-share.oss-cn-hangzhou.aliyuncs.com/fake-key</Location>
          <Bucket>ming-oss-share</Bucket>
          <Key>fake-key</Key>
          <ETag>"{0}-2"</ETag>
        </CompleteMultipartUploadResult>
        '''.format(ETAG)

        req_info = RequestInfo()
        do_request.auto_spec = True
        do_request.side_effect = partial(do4body,
                                         req_info=req_info,
                                         data_type=DT_BYTES,
                                         body=body)

        bucket().complete_multipart_upload('fake-key', UPLOAD_ID, parts)

        self.assertEqual(req_info.req.params['uploadId'], UPLOAD_ID)

        expected = b'<CompleteMultipartUpload><Part><PartNumber>2</PartNumber><ETag>"9433E6178C51CFEC867F592F4B827B50"</ETag></Part>' +\
                   b'<Part><PartNumber>3</PartNumber><ETag>"5570B91F31EBB06B6BA93BA6D63BE68A"</ETag></Part></CompleteMultipartUpload>'

        self.assertXmlEqual(expected, req_info.data)
Beispiel #2
0
    def upload(self, upload_path, filepath):
        """
        upload_path 文件上传后的完整路径包括本身
        filepath 本地文件路径
        """
        key = upload_path
        filename = filepath

        total_size = os.path.getsize(filename)
        # determine_part_size方法用来确定分片大小。
        part_size = determine_part_size(total_size, preferred_size=100 * 1024)

        # 初始化分片。
        upload_id = self.bucket.init_multipart_upload(key).upload_id
        parts = []

        # 逐个上传分片。
        with open(filename, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
                result = self.bucket.upload_part(
                    key, upload_id, part_number,
                    SizedFileAdapter(fileobj, num_to_upload))
                parts.append(PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1

        # 完成分片上传。
        self.bucket.complete_multipart_upload(key, upload_id, parts)
Beispiel #3
0
    def uploadFile(self):
        filelist = self.file_name()
        if filelist:
            try:
                for file in filelist:
                    fileDir = file.split(self.dirname)[1]
                    key = (self.dirname + fileDir).replace('\\', '/')
                    total_size = os.path.getsize(file)
                    # determine_part_size方法用来确定分片大小。
                    part_size = determine_part_size(total_size, preferred_size=100 * 1024)

                    # 初始化分片。
                    upload_id = self.bucket.init_multipart_upload(key).upload_id
                    parts = []

                    # 逐个上传分片。
                    with open(file, 'rb') as fileobj:
                        part_number = 1
                        offset = 0
                        while offset < total_size:
                            num_to_upload = min(part_size, total_size - offset)
                            # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
                            result = self.bucket.upload_part(key, upload_id, part_number,
                                                             SizedFileAdapter(fileobj, num_to_upload))
                            parts.append(PartInfo(part_number, result.etag))
                            offset += num_to_upload
                            part_number += 1

                    # 完成分片上传。
                    self.bucket.complete_multipart_upload(key, upload_id, parts)
                logging.info('upload file to yun ok')
                self.sendMail()
            except Exception as e:
                logging.info('upload file to yun error')
                logging.error(e)
Beispiel #4
0
def OssSliceUpload():
    accessKeyId = sys.argv[1]
    accessSecret = sys.argv[2]
    EndPoint = sys.argv[3]
    Bucket = sys.argv[4]
    RemoteFile = sys.argv[5]
    LocalFile = sys.argv[6]

    auth = oss2.Auth(accessKeyId, accessSecret)
    bucket = oss2.Bucket(auth, EndPoint, Bucket)

    key = RemoteFile
    filename = LocalFile
    total_size = os.path.getsize(filename)
    part_size = determine_part_size(total_size, preferred_size=100 * 1024)
    # 初始化分片
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []
    # 逐个上传分片
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                SizedFileAdapter(fileobj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1
    # 完成分片上传
    bucket.complete_multipart_upload(key, upload_id, parts)
    # 验证一下
    with open(filename, 'rb') as fileobj:
        assert bucket.get_object(key).read() == fileobj.read()
Beispiel #5
0
def large_file_to_oss(id, file, type_name='courses'):
    try:
        file_name = ''
        if file:
            _, file_ext = os.path.splitext(file.name)
            md = hashlib.md5()
            md.update((str(time.time()) + file.name).encode('utf8'))
            file_name = type_name + '/' + str(id) + '/' + md.hexdigest() + file_ext
            oss_file_name = os.path.join(settings.ALIYUN_OSS_DIRECTORY_PREFIX, file_name)
            bucket = aliyun_oss()
            key = oss_file_name
            filename = file
            total_size = filename.size
            part_size = determine_part_size(total_size, preferred_size=100 * 1024)
            # 初始化分片
            upload_id = bucket.init_multipart_upload(key).upload_id
            parts = []
            # 逐个上传分片
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                result = bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(file, num_to_upload))
                parts.append(PartInfo(part_number, result.etag))
                offset += num_to_upload
                part_number += 1
            # 完成分片上传
            bucket.complete_multipart_upload(key, upload_id, parts)
            # # 验证一下
            #
            # assert bucket.get_object(key).read() == file.read()
        return file_name
    except Exception:
        logger.error("Exceptions: {}".format(sys.exc_info()[0]))
    def upload(self):
        psize = oss2.determine_part_size(self.__totalSize,
                                         preferred_size=self.__partSize)

        # 初始化分片
        self.__uploadId = self.__bucket.init_multipart_upload(
            self.__key).upload_id

        # 逐个上传分片
        with open(AliyunVodUtils.toUnicode(self.__fileName), 'rb') as fileObj:
            partNumber = 1
            offset = 0
            while offset < self.__totalSize:
                uploadSize = min(psize, self.__totalSize - offset)
                logger.info(
                    "UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s"
                    % (self.__fileName, self.__videoId, self.__uploadId,
                       partNumber, uploadSize))
                result = self.__bucket.upload_part(
                    self.__key, self.__uploadId, partNumber,
                    SizedFileAdapter(fileObj, uploadSize))
                self.__finishedParts.append(PartInfo(partNumber, result.etag))
                offset += uploadSize
                partNumber += 1

        # 完成分片上传
        self.__bucket.complete_multipart_upload(self.__key,
                                                self.__uploadId,
                                                self.__finishedParts,
                                                headers=self.__headers)

        return result
    def upload_multiple_object(self, bucket_name, obj_name, obj_file):
        try:
            bucket = oss2.Bucket(auth, url, bucket_name)
            total_size = os.path.getsize(obj_file)
            # Use determine_part_size to determine the part size.
            part_size = determine_part_size(total_size,
                                            preferred_size=100 * 1024)
            # Initialize a multipart upload event.
            upload_id = bucket.init_multipart_upload(obj_name).upload_id
            parts = []
            # Upload parts one by one.
            with open(obj_file, 'rb') as fileobj:
                part_number = 1
                offset = 0
                while offset < total_size:
                    num_to_upload = min(part_size, total_size - offset)
                    # The SizedFileAdapter(fileobj, size) method generates a new object, and re-calculates the initial append location.
                    result = bucket.upload_part(
                        obj_name, upload_id, part_number,
                        SizedFileAdapter(fileobj, num_to_upload))
                    parts.append(PartInfo(part_number, result.etag))
                    offset += num_to_upload
                    part_number += 1

            # Complete multipart upload.
            bucket.complete_multipart_upload(obj_name, upload_id, parts)
            # Verify the multipart upload.
            with open(obj_file, 'rb') as fileobj:
                assert bucket.get_object(obj_name).read() == fileobj.read()
            status = "success"
        except Exception as e:
            status = str(e)
        return status
    def upload(self):
        psize = oss2.determine_part_size(self.__totalSize, preferred_size=self.__partSize)
        
        # 初始化分片
        self.__uploadId = self.__bucket.init_multipart_upload(self.__key).upload_id

        startTime = time.time()
        expireSeconds = 2500    # 上传凭证有效期3000秒,提前刷新
        # 逐个上传分片
        with open(AliyunVodUtils.toUnicode(self.__fileName), 'rb') as fileObj:
            partNumber = 1
            offset = 0
            while offset < self.__totalSize:
                uploadSize = min(psize, self.__totalSize - offset)
                #logger.info("UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s" % (self.__fileName, self.__videoId, self.__uploadId, partNumber, uploadSize))
                result = self.__bucket.upload_part(self.__key, self.__uploadId, partNumber, SizedFileAdapter(fileObj,uploadSize))
                #print(result.request_id)
                self.__finishedParts.append(PartInfo(partNumber, result.etag))
                offset += uploadSize
                partNumber += 1

                # 上传进度回调
                self.__progressCallback(offset, self.__totalSize)

                # 检测上传凭证是否过期
                nowTime = time.time()
                if nowTime - startTime >= expireSeconds:
                    self.__bucket = self.__refreshAuthCallback(self.__videoId)
                    startTime = nowTime

        # 完成分片上传
        self.__bucket.complete_multipart_upload(self.__key, self.__uploadId, self.__finishedParts, headers=self.__headers)
        
        return result
Beispiel #9
0
    def consumer(queue):
        while queue.ok():
            item = queue.get()
            if item is None:
                break

            part_no, part_data = item
            res = oss_client.upload_part(dest_file, upload_id, part_no, part_data)
            parts.append(PartInfo(part_no, res.etag))
Beispiel #10
0
    def test_complete(self, do_request):
        from oss2.models import PartInfo

        parts = list()
        parts.append(PartInfo(1, '4DE8075FB607DF4D13FBC480EA488EFA'))
        parts.append(PartInfo(2, 'AF947EC157726CEA88ED83B3C989063B'))

        request_text = '''POST /pasncdoyuvuvuiyewfsobdwn?uploadId=65484B78EF3846298B8E2DC1643F8F37 HTTP/1.1
Host: ming-oss-share.oss-cn-hangzhou.aliyuncs.com
Accept-Encoding: identity
Connection: keep-alive
Content-Length: 223
date: Sat, 12 Dec 2015 00:36:26 GMT
User-Agent: aliyun-sdk-python/2.0.2(Windows/7/;3.3.3)
Accept: */*
authorization: OSS ZCDmm7TPZKHtx77j:TgjWAumJAl8dDr0yqWHOyqqwrd0=

<CompleteMultipartUpload><Part><PartNumber>1</PartNumber><ETag>"4DE8075FB607DF4D13FBC480EA488EFA"</ETag></Part><Part><PartNumber>2</PartNumber><ETag>"AF947EC157726CEA88ED83B3C989063B"</ETag></Part></CompleteMultipartUpload>'''

        response_text = '''HTTP/1.1 200 OK
Server: AliyunOSS
Date: Sat, 12 Dec 2015 00:36:26 GMT
Content-Type: application/xml
Content-Length: 327
Connection: keep-alive
x-oss-request-id: 566B6C0A05200A20B174994F
ETag: "1C787C506EABFB9B45EAAA8DB039F4B2-2"

<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult>
  <Location>http://ming-oss-share.oss-cn-hangzhou.aliyuncs.com/pasncdoyuvuvuiyewfsobdwn</Location>
  <Bucket>ming-oss-share</Bucket>
  <Key>pasncdoyuvuvuiyewfsobdwn</Key>
  <ETag>"1C787C506EABFB9B45EAAA8DB039F4B2-2"</ETag>
</CompleteMultipartUploadResult>'''

        req_info = unittests.common.mock_response(do_request, response_text)

        result = unittests.common.bucket().complete_multipart_upload(
            'pasncdoyuvuvuiyewfsobdwn', '65484B78EF3846298B8E2DC1643F8F37',
            parts)

        self.assertRequest(req_info, request_text)
        self.assertEqual(result.etag, '1C787C506EABFB9B45EAAA8DB039F4B2-2')
Beispiel #11
0
  def consumer(queue):
    while queue.ok():
      item = queue.get()
      if item is None:
          break

      part_no, part_range = item
      object_stream = src_client.get_object(key, byte_range=part_range)
      res = dest_client.upload_part(key, upload_id, part_no, object_stream)
      parts.append(PartInfo(part_no, res.etag))
    def test_upload_part(self):
        key = 'requestpayment-test-upload-part-object'
        filename = key + '.txt'
        content = b'1' * 1024 * 1024

        headers = dict()
        headers[OSS_REQUEST_PAYER] = "requester"

        content = random_bytes(1024 * 1024)

        with open(filename, 'wb') as f:
            f.write(content)

        total_size = os.path.getsize(filename)
        # Set part size
        part_size = determine_part_size(total_size, preferred_size=(100*1024))

        # Init multipart without payer setting, should be failed.
        self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.init_multipart_upload, key)

        # Init multipart with payer setting, should be successful.
        upload_id = self.payer_bucket.init_multipart_upload(key, headers=headers).upload_id
        parts = []

        # Upload part without payer setting, should be failed.
        with open(filename, 'rb') as fileobj:
            part_number = 1
            num_to_upload = part_size
            self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.upload_part, key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload))

        # Upload part with payer setting, should be successful.
        with open(filename, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)
                result = self.payer_bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload), headers=headers)
                parts.append(PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1

        # Complete multipart upload without payer setting, should be failed.
        self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.complete_multipart_upload, key, upload_id, parts)

        # Complete multipart upload with payer setting, should be successful.
        result = self.payer_bucket.complete_multipart_upload(key, upload_id, parts, headers=headers)
        self.assertEqual(result.status, 200)

        self.bucket.delete_object(key)
        os.remove(filename)
    def test_upload_part(self):
        key = 'traffic-limit-test-resumble-upload-object'
        # Create tmp file 2MB
        file_name = self._prepare_temp_file_with_size(OBJECT_SIZE_1MB * 2)

        total_size = os.path.getsize(file_name)
        # Determine part size is 1MB
        part_size = determine_part_size(total_size,
                                        preferred_size=(1024 * 1024))

        # Init part
        upload_id = self.bucket.init_multipart_upload(key).upload_id
        parts = []

        headers = dict()
        headers[OSS_TRAFFIC_LIMIT] = str(LIMIT_100KB)

        #  Upload part
        with open(file_name, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)

                # Upload part with traffic limit setting
                start_time_sec = int(time.time())
                result = self.bucket.upload_part(key,
                                                 upload_id,
                                                 part_number,
                                                 SizedFileAdapter(
                                                     fileobj, num_to_upload),
                                                 headers=headers)
                end_time_sec = int(time.time())

                # Calculate expensed time
                expense_time_sec = end_time_sec - start_time_sec
                # Theoretical time is 1MB/100KB = 10s, set the minimum theoretical time to 10*0.7s
                theoretical_exepnse_min = 10 * 0.7
                # Compare to minimum theoretical time
                self.assertEqual((expense_time_sec > theoretical_exepnse_min),
                                 True)
                self.assertEqual((expense_time_sec > theoretical_exepnse_min),
                                 True)

                parts.append(PartInfo(part_number, result.etag))
                offset += num_to_upload
                part_number += 1

        result = self.bucket.complete_multipart_upload(key, upload_id, parts)
        self.assertEqual(result.status, 200)
Beispiel #14
0
    def oss_upload(osskey, filename, uploadpath, de_suffix=True):
        try:
            # 当 de_suffix 是Ture,使用正则把文件的后缀去掉
            if de_suffix is True:
                osskey = re.match(r'\w+', filename).group()

            # 阿里云主账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录 https://ram.console.aliyun.com 创建RAM账号。
            auth = oss2.Auth(ACCESSKEYID, ACCESSKEYSECRET)
            # Endpoint以杭州为例,其它Region请按实际情况填写。
            bucket = oss2.Bucket(auth, ENDPOINT, BUCKETNAME)

            key = uploadpath + osskey
            filename = filename

            total_size = os.path.getsize(filename)

            # determine_part_size方法用来确定分片大小。
            part_size = determine_part_size(total_size,
                                            preferred_size=10240 * 1000)

            # 初始化分片。
            upload_id = bucket.init_multipart_upload(key).upload_id
            parts = []
            # 逐个上传分片。
            with open(filename, 'rb') as fileobj:
                part_number = 1
                offset = 0

                while offset < total_size:
                    num_to_upload = min(part_size, total_size - offset)
                    # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
                    result = bucket.upload_part(
                        key,
                        upload_id,
                        part_number,
                        SizedFileAdapter(
                            fileobj,
                            num_to_upload,
                        ),
                    )
                    parts.append(PartInfo(part_number, result.etag))

                    offset += num_to_upload
                    part_number += 1

                # 完成分片上传
                bucket.complete_multipart_upload(key, upload_id, parts)

        except Exception as f:
            Print.error(f)
    def test_upload_part_copy(self):
        src_object_name = 'requestpayment-test-upload-part-copy-src'
        dest_object_name = 'requestpayment-test-upload-part-copy-dest'
        content = b'a' * 1024 * 1024

        headers = dict()
        headers[OSS_REQUEST_PAYER] = "requester"

        self.bucket.put_object(src_object_name, content)        

        # Get src object size
        head_info = self.bucket.head_object(src_object_name)
        total_size = head_info.content_length
        self.assertEqual(total_size, 1024 * 1024)

        # Set part size
        part_size = determine_part_size(total_size, preferred_size=(100*1024))

        upload_id = self.payer_bucket.init_multipart_upload(dest_object_name, headers=headers).upload_id
        parts = []

        # Upload part copy without payer setting, should be failed.
        part_number = 1
        offset = 0
        num_to_upload = min(part_size, total_size - offset)
        end = offset + num_to_upload - 1;
        self.assertRaises(oss2.exceptions.AccessDenied, self.payer_bucket.upload_part_copy, self.payer_bucket.bucket_name, 
                            src_object_name, (offset, end), dest_object_name, upload_id, part_number)

        # Upload part copy with payer setting, should be successful.
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            end = offset + num_to_upload - 1;
            result = self.payer_bucket.upload_part_copy(self.payer_bucket.bucket_name, src_object_name, (offset, end), 
                                dest_object_name, upload_id, part_number, headers=headers)

            parts.append(PartInfo(part_number, result.etag))

            offset += num_to_upload
            part_number += 1

        # Complete multipart upload with payer setting, should be successful.
        result = self.payer_bucket.complete_multipart_upload(dest_object_name, upload_id, parts, headers=headers)

        self.bucket.delete_object(src_object_name)
        self.bucket.delete_object(dest_object_name)
Beispiel #16
0
def handler(event, context):
    logger = logging.getLogger()
    evt = json.loads(event)
    logger.info("Handling event: %s", evt)
    dest_client = get_oss_client(
        context,
        evt.get("dest_oss_endpoint") or os.environ['DEST_OSS_ENDPOINT'],
        evt["dest_bucket"])

    parts = []
    for part in evt["parts"]:
        parts.append(PartInfo(part["part_no"], part["etag"]))

    dest_client.complete_multipart_upload(evt["key"], evt["upload_id"], parts)

    return {}
Beispiel #17
0
    def _pipe_file(self, path, data, chunksize=50 * 2**20, **kwargs):
        bucket, key = self.split_path(path)
        size = len(data)
        bucket = oss2.Bucket(self.auth, self.endpoint, bucket)
        if size < 5 * 2**20:
            return bucket.put_object(key, data)
        else:
            mpu = bucket.init_multipart_upload(key, **kwargs)

            out = [
                bucket.upload_part(key, mpu.upload_id, i + 1,
                                   data[off:off + chunksize])
                for i, off in enumerate(range(0, len(data), chunksize))
            ]

            parts = [PartInfo(i + 1, o.etag) for i, o in enumerate(out)]
            bucket.complete_multipart_upload(key, mpu.upload_id, parts)
        self.invalidate_cache(path)
Beispiel #18
0
 def push_by_piece(self, path, name):
     path = Path(path)
     total_size = path.stat().st_size
     part_size = determine_part_size(total_size, preferred_size=1024 * 1024)
     upload_id = self.bucket.init_multipart_upload(name).upload_id
     parts = []
     with open(path, 'rb') as fileobj:
         part_number = 1
         offset = 0
         while offset < total_size:
             num_to_upload = min(part_size, total_size - offset)
             # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
             result = self.bucket.upload_part(name, upload_id, part_number,
                                              SizedFileAdapter(fileobj,
                                                               num_to_upload))
             parts.append(PartInfo(part_number, result.etag))
             offset += num_to_upload
             part_number += 1
     headers = {'Content-MD5': self.get_md5(path)}
     self.bucket.complete_multipart_upload(name, upload_id, parts, headers=headers)
Beispiel #19
0
def upload(oss_task_zip, zip_task_file, endpoint, bucket_name):
    dlog.debug(f"debug: upload: oss_task_zip:{oss_task_zip}; zip_task_file:{zip_task_file}")
    bucket = _get_oss_bucket(endpoint, bucket_name)
    total_size = os.path.getsize(zip_task_file)
    part_size = determine_part_size(total_size, preferred_size=1000 * 1024)
    upload_id = bucket.init_multipart_upload(oss_task_zip).upload_id
    parts = []
    with open(zip_task_file, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(oss_task_zip, upload_id, part_number, SizedFileAdapter(fileobj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1
    # result = bucket.complete_multipart_upload(oss_task_zip, upload_id, parts)
    result = bucket.complete_multipart_upload(oss_task_zip, upload_id, parts)
    # print('debug:upload_result:', result, dir())
    return result
Beispiel #20
0
def handler(event, context):
    logger = logging.getLogger()
    evt = json.loads(event)
    logger.info("Handling event: %s", evt)
    src_endpoint = 'https://oss-%s-internal.aliyuncs.com' % context.region
    dest_bucket = evt["dest_bucket"]
    dest_client = clients.get(dest_bucket)
    if dest_client is None:
        dest_client = get_oss_client(
            context,
            evt.get("dest_oss_endpoint") or os.environ.get('DEST_OSS_ENDPOINT')
            or src_endpoint, dest_bucket, evt.get("dest_access_role"))
        clients[dest_bucket] = dest_client

    parts = []
    for part in evt["parts"]:
        parts.append(PartInfo(part["part_no"], part["etag"]))

    dest_client.complete_multipart_upload(evt["key"], evt["upload_id"], parts)

    return {}
Beispiel #21
0
    def _initiate_upload(self):
        if self.autocommit and not self.append_block and self.tell(
        ) < self.blocksize:
            # only happens when closing small file, use on-shot PUT
            return
        logger.debug("Initiate upload for %s" % self)
        self.parts = []
        self.mpu = self.bucket.init_multipart_upload(self.key)

        if self.append_block:
            # use existing data in key when appending,
            # and block is big enough
            result = self.bucket.upload_part_copy(
                self.bucket_name,
                self.key,
                byte_range=None,
                target_key=self.key,
                target_upload_id=self.mpu.upload_id,
                target_part_number=1,
            )
            self.parts.append(PartInfo(1, result.etag))
Beispiel #22
0
    def _save(self, name, content: File):
        # 为保证django行为的一致性,保存文件时,应该返回相对于`media path`的相对路径。

        target_name = self._get_target_name(name)

        logger.debug('OSS存储后端:保存文件 %s' % target_name)

        content.open()

        # 默认分片大小 1MB
        DEFAULT_CHUNK_SIZE = 1 * 1024 * 1024

        logger.debug('OSS存储后端:读取完成,文件大小 %d' % content.size)
        if not content.multiple_chunks(chunk_size=DEFAULT_CHUNK_SIZE):
            logger.debug('OSS存储后端:不分片,开始上传')
            # 不分片
            content_str = content.file.read()
            self.bucket.put_object(target_name, content_str)
        else:
            logger.debug('OSS存储后端:分片,开始上传')
            # 改用分片上传方式
            upload_id = self.bucket.init_multipart_upload(
                target_name).upload_id
            parts = []
            part_id = 1

            for chunk in content.chunks(chunk_size=DEFAULT_CHUNK_SIZE):
                # TODO Create an API endpoint for getting uploading process
                result = self.bucket.upload_part(target_name, upload_id,
                                                 part_id, chunk)
                parts.append(PartInfo(part_id, result.etag))
                part_id += 1
                logger.debug('OSS存储后端:上传分片 #%d' % part_id)
            result = self.bucket.complete_multipart_upload(
                target_name, upload_id, parts)

        logger.debug('OSS存储后端:上传完毕,关闭文件')
        content.close()
        return self._clean_name(name)
Beispiel #23
0
def main(args):

    filename = args.upload_file
    key = os.path.split(filename)[1]
    for param in (access_key_id, access_key_secret, bucket_name, endpoint):
        assert '<' not in param, '请设置参数:' + param

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
    bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

    total_size = os.path.getsize(filename)
    part_size = determine_part_size(total_size, preferred_size=100000 * 1024)

    # 初始化分片
    print('OSS initializing:')
    encode_md5 = calculate_file_md5(filename)
    print('%s s md5 is %s' % (filename, encode_md5))
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []

    # 逐个上传分片
    print('Seperate the file batch and uploading:')
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(key, upload_id, part_number,
                                        SizedFileAdapter(fileobj, num_to_upload), progress_callback=percentage)
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

    # 完成分片上传
    print('Complete the uploading task:')
    result = bucket.complete_multipart_upload(key, upload_id, parts)

    print('upload results: %i' % result)
    print ('upload complete with the file %s' % key)
Beispiel #24
0
    def _upload_chunk(self, final=False):
        bucket, key = self.fs.split_path(self.path)
        logger.debug("Upload for %s, final=%s, loc=%s, buffer loc=%s" %
                     (self, final, self.loc, self.buffer.tell()))
        if (self.autocommit and not self.append_block and final
                and self.tell() < self.blocksize):
            # only happens when closing small file, use on-shot PUT
            data1 = False
        else:
            self.buffer.seek(0)
            (data0, data1) = (None, self.buffer.read(self.blocksize))

        while data1:
            (data0, data1) = (data1, self.buffer.read(self.blocksize))
            data1_size = len(data1)

            if 0 < data1_size < self.blocksize:
                remainder = data0 + data1
                remainder_size = self.blocksize + data1_size

                if remainder_size <= self.part_max:
                    (data0, data1) = (remainder, None)
                else:
                    partition = remainder_size // 2
                    (data0, data1) = (remainder[:partition],
                                      remainder[partition:])

            part = len(self.parts) + 1
            logger.debug("Upload chunk %s, %s" % (self, part))

            result = self.bucket.upload_part(self.key, self.mpu.upload_id,
                                             part, data0)

            self.parts.append(PartInfo(part, result.etag))

        if self.autocommit and final:
            self.commit()
        return not final
Beispiel #25
0
def upload(filename, key):
    total_size = os.path.getsize(filename)
    part_size = determine_part_size(total_size, preferred_size=100 * 1024)

    END_POINT = 'oss-cn-shanghai.aliyuncs.com'
    AUTH = Auth('LTAIStvC4wpBWRVG', 'BNXtvOz82JjzlSLjPBdQJyEUpXi4PD')
    BUCKET_NAME = "yunbeifeng"
    bucket = Bucket(AUTH, END_POINT, bucket_name=BUCKET_NAME)

    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                SizedFileAdapter(fileobj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

    bucket.complete_multipart_upload(key, upload_id, parts)
result = bucket.init_multipart_upload(key, headers=headers)
sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
print('sse_algorithm:', sse_algo)
print('data_algorithm:', data_algo)
print('kms_key_id:', kms_key_id)

upload_id = result.upload_id
parts = []

# 逐个上传分片。
with open(filename, 'rb') as fileobj:
    part_number = 1
    offset = 0
    while offset < total_size:
        num_to_upload = min(part_size, total_size - offset)
        # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
        result = bucket.upload_part(key, upload_id, part_number,
                                    SizedFileAdapter(fileobj, num_to_upload))
        parts.append(PartInfo(part_number, result.etag))

        offset += num_to_upload
        part_number += 1

# 完成分片上传。
bucket.complete_multipart_upload(key, upload_id, parts)

# 验证分片上传。
with open(filename, 'rb') as fileobj:
    assert bucket.get_object(key).read() == fileobj.read()
Beispiel #27
0
    def copy(self, src, dst):
        raw_src, raw_dst = str(src), str(dst)
        try:
            cloud_src = src.startswith('oss://')
            cloud_dst = dst.startswith('oss://')
            if not cloud_src and not cloud_dst:
                return super().copy(src, dst)

            if src == dst:
                return
            # download
            if cloud_src and not cloud_dst:
                target_dir, _ = os.path.split(dst)
                if not os.path.exists(target_dir):
                    os.makedirs(target_dir)
                bucket, src = self._split(src)
                obj = bucket.get_object(src)
                if obj.content_length > 100 * 1024 ** 2:  # 100M
                    with oss_progress('downloading') as callback:
                        bucket.get_object_to_file(src, dst, progress_callback=callback)
                else:
                    bucket.get_object_to_file(src, dst)
                return
            bucket, dst = self._split(dst)
            # upload
            if cloud_dst and not cloud_src:
                src_size = os.stat(src).st_size
                if src_size > 5 * 1024 ** 3:  # 5G
                    raise RuntimeError(f'A file > 5G cannot be uploaded to OSS. Please split your file first.\n{src}')
                if src_size > 100 * 1024 ** 2:  # 100M
                    with oss_progress('uploading') as callback:
                        bucket.put_object_from_file(dst, src, progress_callback=callback)
                else:
                    bucket.put_object_from_file(dst, src)
                return
            # copy between oss paths
            src_bucket, src = self._split(src)
            total_size = src_bucket.head_object(src).content_length
            if src_bucket.get_bucket_location().location != bucket.get_bucket_location().location:
                import tempfile
                local_tmp = os.path.join(tempfile.gettempdir(), src)
                self.copy(f'oss://{src_bucket.bucket_name}/{src}', local_tmp)
                self.copy(local_tmp, f'oss://{bucket.bucket_name}/{dst}')
                self.remove(local_tmp)
                return

            if total_size < 1024 ** 3 or src_bucket != bucket:  # 1GB
                bucket.copy_object(src_bucket.bucket_name, src, dst)
            else:
                # multipart copy
                from oss2.models import PartInfo
                from oss2 import determine_part_size
                part_size = determine_part_size(total_size, preferred_size=100 * 1024)
                upload_id = bucket.init_multipart_upload(dst).upload_id
                parts = []

                part_number = 1
                offset = 0
                while offset < total_size:
                    num_to_upload = min(part_size, total_size - offset)
                    byte_range = (offset, offset + num_to_upload - 1)

                    result = bucket.upload_part_copy(bucket.bucket_name, src, byte_range, dst, upload_id,
                                                     part_number)
                    parts.append(PartInfo(part_number, result.etag))

                    offset += num_to_upload
                    part_number += 1

                bucket.complete_multipart_upload(dst, upload_id, parts)
        except Exception as e:
            print("haha")
            print("{}".format(e))
            print("Copy failed because oss auth not fully opened. Using first download then upload...")
            try:
                self.download(raw_src, ".easy_distill_tmp_file")
                self.upload(".easy_distill_tmp_file", raw_dst)
                print("Copying done")
            except Exception as e:
                print("{}".format(e))
    def test_multipart_upload(self):
        auth = oss2.Auth(OSS_ID, OSS_SECRET)
        bucket_name = OSS_BUCKET + "-test-multipart-upload-data-encryption"
        bucket = oss2.Bucket(auth, self.endpoint, bucket_name)
        bucket.create_bucket()

        key = 'data-encryption-test-upload-part-object'
        filename = self._prepare_temp_file_with_size(1024 * 1024)

        headers = dict()
        headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
        headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4

        total_size = os.path.getsize(filename)
        # Set part size
        part_size = determine_part_size(total_size, preferred_size=(100*1024))

        # Init multipart with encryption headers.
        result = bucket.init_multipart_upload(key, headers=headers)
        ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
        ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
        ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
        self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
        self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
        self.assertIsNotNone(ret_kms_key_id)

        kms_key_id = ret_kms_key_id
        upload_id = result.upload_id
        parts = []

        # Upload part with the encyrption headers will be failed.
        headers = dict()
        headers[OSS_SERVER_SIDE_ENCRYPTION] = SERVER_SIDE_ENCRYPTION_KMS
        headers[OSS_SERVER_SIDE_DATA_ENCRYPTION] = KMS_DATA_ENCRYPTION_SM4
        with open(filename, 'rb') as fileobj:
            part_number = 1
            num_to_upload = part_size
            self.assertRaises(oss2.exceptions.InvalidArgument, bucket.upload_part, key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload), headers=headers)

        # Upload part with none encryption headers.
        with open(filename, 'rb') as fileobj:
            part_number = 1
            offset = 0
            while offset < total_size:
                num_to_upload = min(part_size, total_size - offset)

                result = bucket.upload_part(key, upload_id, part_number,
                                            SizedFileAdapter(fileobj, num_to_upload))

                parts.append(PartInfo(part_number, result.etag))

                offset += num_to_upload
                part_number += 1

                ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
                ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
                ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
                self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
                self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
                self.assertEqual(kms_key_id, ret_kms_key_id)

        # Complete multipart upload with encryption headers.
        self.assertRaises(oss2.exceptions.InvalidArgument, bucket.complete_multipart_upload, key, upload_id, parts, headers=headers)

        # Complete multipart upload with none encryption headers.
        result = bucket.complete_multipart_upload(key, upload_id, parts)
        self.assertEqual(result.status, 200)
        ret_sse_algo = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION)
        ret_data_algo = result.headers.get(OSS_SERVER_SIDE_DATA_ENCRYPTION)
        ret_kms_key_id = result.headers.get(OSS_SERVER_SIDE_ENCRYPTION_KEY_ID)
        self.assertEqual(SERVER_SIDE_ENCRYPTION_KMS, ret_sse_algo)
        self.assertEqual(KMS_DATA_ENCRYPTION_SM4, ret_data_algo)
        self.assertEqual(kms_key_id, ret_kms_key_id)

        bucket.delete_object(key)