Exemplo n.º 1
0
    def persist_file(self, path, buf, info, meta=None, headers=None):
        # 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
        total_size = len(buf)
        part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

        # 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
        key = os.path.join(self.objectPath, info)
        upload_id = self.bucket.init_multipart_upload(key).upload_id

        # 逐个上传分片
        # 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
        parts = []
        part_number = 1
        offset = 0
        while offset < total_size:
            size_to_upload = min(part_size, total_size - offset)
            result = self.bucket.upload_part(key, upload_id, part_number,
                                             oss2.SizedFileAdapter(buf, size_to_upload))
            parts.append(oss2.models.PartInfo(part_number,
                                              result.etag,
                                              size=size_to_upload,
                                              part_crc=result.crc))

            offset += size_to_upload
            part_number += 1

            # 完成分片上传
            self.bucket.complete_multipart_upload(key, upload_id, parts)
Exemplo n.º 2
0
def main(key, filename):
    auth = oss2.Auth('accessId', 'accessSecret')
    bucket = oss2.Bucket(auth, 'endpoint.aliyuncs.com', 'bucket')

    total_size = os.path.getsize(filename)
    part_size = oss2.determine_part_size(total_size,
                                         preferred_size=10 * 1024 * 1024)

    # init
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []

    print 'start to upload {} with id {}'.format(filename, upload_id)

    # upload
    with open(filename, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                oss2.SizedFileAdapter(fileobj, num_to_upload))
            parts.append(oss2.models.PartInfo(part_number, result.etag))

            offset += num_to_upload
            part_number += 1

    # complete
    bucket.complete_multipart_upload(key, upload_id, parts)
    print 'done!'
Exemplo n.º 3
0
def test_upload_success(test_bucket_access, test_create_file):
    bucket = test_bucket_access
    bucket_stat = bucket.get_bucket_stat()
    count_before_upload = bucket_stat.object_count
    current_app.logger.info('storage: ' +
                            str(bucket_stat.storage_size_in_bytes))
    current_app.logger.info('object count: ' + str(bucket_stat.object_count))
    current_app.logger.info('multi part upload count: ' +
                            str(bucket_stat.multi_part_upload_count))
    filename = test_create_file
    # 也可以直接调用分片上传接口。
    # 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
    import os
    import oss2
    total_size = os.path.getsize(filename)
    current_app.logger.info('filesize: ' + str(total_size))
    part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)
    # 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
    key = '{0}.txt'.format(random_string(10))
    upload_id = bucket.init_multipart_upload(key).upload_id

    # 逐个上传分片
    # 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
    with open(filename, 'rb') as fileobj:
        parts = []
        part_number = 1
        offset = 0
        while offset < total_size:
            size_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                key, upload_id, part_number,
                oss2.SizedFileAdapter(fileobj, size_to_upload))
            parts.append(
                oss2.models.PartInfo(part_number,
                                     result.etag,
                                     size=size_to_upload,
                                     part_crc=result.crc))

            offset += size_to_upload
            part_number += 1

        # 完成分片上传
        bucket.complete_multipart_upload(key, upload_id, parts)
    count_after_upload = bucket.get_bucket_stat().object_count
    assert count_before_upload < count_after_upload
Exemplo n.º 4
0
    def uploadFile2OssByPart(self, file, objPath):  # 分片上传方法
        '''
        把文件对象分片上传oss
        :param file:  文件对象
        :param objPath: 上传到oss对应的文件名(路径)
        :return:
        '''
        totalSize = file.size
        # determine_part_size方法用于确定分片大小。
        partSize = oss2.determine_part_size(totalSize,
                                            preferred_size=PREFERRED_SIZE)
        uploadId = self.bucket.init_multipart_upload(objPath).upload_id
        parts = []
        # 分片上传
        executor = ThreadPoolExecutor(max_workers=1)
        allTask = []
        partNumber = 1
        offset = 0
        while offset < totalSize:
            numToUpload = min(partSize, totalSize - offset)
            print(partNumber, numToUpload)
            ## 一般上传
            # 调用SizedFileAdapter(file, size)方法会生成一个新的文件对象,重新计算起始追加位置。
            result = self.bucket.upload_part(
                objPath, uploadId, partNumber,
                oss2.SizedFileAdapter(file, numToUpload))
            parts.append(oss2.models.PartInfo(partNumber, result.etag))
            ## 多线程上传
            # allTask.append(executor.submit(_uploadPart, partNumber, numToUpload))
            # offset += numToUpload
            # partNumber += 1
        # 完成分片上传。
        wait(allTask, return_when=ALL_COMPLETED)
        resultList = [future.result() for future in as_completed(allTask)]
        for data in sorted(resultList, key=lambda x: x[0]):  # 重排序按正常数据加入parts
            partNumber, result = data[0], data[1]
            print(partNumber)
            parts.append(oss2.models.PartInfo(partNumber, result.etag))

        self.bucket.complete_multipart_upload(objPath, uploadId, parts)
        return getOssSavePath(objPath)
Exemplo n.º 5
0
    def _store_in_thread(self, file):
        # 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
        total_size = os.path.getsize(file)
        part_size = oss2.determine_part_size(total_size,
                                             preferred_size=128 * 1024)

        # 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
        key = file.replace('../', '')
        upload_id = self.bucket.init_multipart_upload(key).upload_id

        # 逐个上传分片
        # 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
        with open(file, 'rb') as fileobj:
            parts = []
            part_number = 1
            offset = 0
            while offset < total_size:
                size_to_upload = min(part_size, total_size - offset)
                result = self.bucket.upload_part(
                    key, upload_id, part_number,
                    oss2.SizedFileAdapter(fileobj, size_to_upload))
                parts.append(
                    oss2.models.PartInfo(part_number,
                                         result.etag,
                                         size=size_to_upload,
                                         part_crc=result.crc))

                offset += size_to_upload
                part_number += 1

            # 完成分片上传
            self.bucket.complete_multipart_upload(key, upload_id, parts)

        # 验证一下
        with open(file, 'rb') as fileobj:
            assert self.bucket.get_object(key).read() == fileobj.read()
Exemplo n.º 6
0
    def uploadFile(self):
        """
        Function check if cache file with uploading information is exist and try to continue uplod otherwise start new 
        file uploading to OSS
        :return: 
        """
        total_size = os.stat(self.queue_object.path).st_size
        part_size = oss2.determine_part_size(total_size, preferred_size=2 * 1024 * 1024)

        try:
            with open(self.pkl_path, 'rb') as input_file:  # We try to open file with title of uploading key
                storied_object = pickle.load(input_file)  # if file is open means upload was not finished
                parts = storied_object.parts
                upload_id = storied_object.upload_id

                bucket_name = storied_object.bucket_name
                endpoint = storied_object.endpoint
                bucket = oss2.Bucket(self._auth, endpoint, bucket_name)
                # offset = storied_object.offset
        except IOError:
            bucket = oss2.Bucket(self._auth, self.queue_object.endpoint, self.queue_object.bucket_name)
            upload_id = bucket.init_multipart_upload(self.queue_object.key).upload_id
            storied_object = UploadingObject()
            uploade_parts = bucket.list_multipart_uploads()

            for part in uploade_parts.upload_list:
                if part.upload_id == upload_id:
                    with open(self.pkl_path, 'wb') as output_file:
                        storied_object.key = self.queue_object.key
                        storied_object.bucket_name = self.queue_object.bucket_name
                        storied_object.endpoint = self.queue_object.endpoint
                        storied_object.path = self.queue_object.path
                        storied_object.upload_id = upload_id
                        storied_object.initiate_date = part.initiation_date
                        pickle.dump(storied_object, output_file, pickle.HIGHEST_PROTOCOL)

            parts = []

        with open(self.queue_object.path, 'rb') as fileobj:
            while storied_object.offset < total_size and not self.stoprequest.isSet():
                # print storied_object.part_number
                # print storied_object.parts
                num_to_upload = min(part_size, total_size - storied_object.offset)
                upload_content = oss2.SizedFileAdapter(fileobj, num_to_upload)
                try:
                    result = bucket.upload_part(self.queue_object.key,
                                                upload_id,
                                                storied_object.part_number,
                                                upload_content)
                except NoSuchUpload:
                    print "\n ==== Not finished upload not exist on  OSS bucket ===="
                    print " Clean local cache, and update uploading queue"
                    os.remove(self.pkl_path)
                    raise UploadIdNotInBucket("Upload id is not in bucket")

                # Append directly to class is didn't work with "pickle"
                parts.append(oss2.models.PartInfo(storied_object.part_number, result.etag))
                storied_object.parts = parts

                if num_to_upload == part_size:
                    percentage = str(self._percentage(num_to_upload * storied_object.part_number, total_size))
                else:
                    percentage = 'Complete'

                # print percentage

                # logging.debug("File: %s => Bucket: %s - %s", key, bucket_name, percentage)
                # print "File: {0} => Bucket: {1} - {2}".format(self.queue_object.key.encode("utf-8"),
                #                                               self.queue_object.bucket_name,
                #                                               percentage)

                sys.stdout.write("\rFile: {0} => Bucket: {1} - {2}".format(self.queue_object.key.encode("utf-8"),
                                                              self.queue_object.bucket_name,
                                                              percentage)).splitlines()
                sys.stdout.flush()

                storied_object.offset += num_to_upload

                storied_object.part_number += 1
                with open(self.pkl_path, 'wb') as output_file:
                    pickle.dump(storied_object, output_file, pickle.HIGHEST_PROTOCOL)

            if not self.stoprequest.isSet():
                bucket.complete_multipart_upload(self.queue_object.key, upload_id, parts)
                os.remove(self.pkl_path)
Exemplo n.º 7
0
def handler(environ, start_response):
    context = environ['fc.context']
    request_uri = environ['fc.request_uri']
    for k, v in environ.items():
        if k.startswith("HTTP_"):
            # process custom request headers
            print(k, v)
            pass

    # get request body
    try:
        request_body_size = int(environ.get('CONTENT_LENGTH', 0))
    except (ValueError):
        request_body_size = 0
    request_body = environ['wsgi.input'].read(request_body_size)
    request_body = urllib.unquote(request_body).decode('utf8')

    # get request method
    request_method = environ['REQUEST_METHOD']
    if request_method != 'POST':
        status = '400 Bad Request'
        response_headers = [('Content-type', 'application/json')]
        start_response(status, response_headers)
        data = json.dumps({"error": "invalid request method."})
        return [data]

    # print request body
    print('request_body: {}'.format(request_body))
    request_body_json = json.loads(request_body)

    creds = context.credentials
    auth = oss2.StsAuth(creds.accessKeyId, creds.accessKeySecret,
                        creds.securityToken)

    items = request_body_json.get("items")
    print("[DEBUG] items: {0}".format(items))

    # zip name
    re_code = request_body_json.get("re_code")
    is_full = request_body_json.get("full")
    uuid = request_body_json.get("uuid")
    tmpdir = '/tmp/download/'

    os.system("rm -rf /tmp/*")
    os.mkdir(tmpdir)

    # download
    for item in items:
        print("[DEBUG] item: {}".format(item))

        oss_protocol = item.get("protocol")
        oss_bucket_name = item.get("bucket")
        oss_endpoint = item.get("endpoint")
        file_path = item.get("path")
        file_original_name = item.get("original_name")

        bucket = oss2.Bucket(auth, oss_endpoint, oss_bucket_name)

        bucket.get_object_to_file(file_path, tmpdir + file_original_name)

    #zip file
    zipname = '/tmp/' + re_code + '.zip'
    make_zip(tmpdir, zipname)

    #upload
    total_size = os.path.getsize(zipname)
    part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

    if is_full:
        zip_path = 'full-archive/' + re_code + '/' + uuid + '.zip'
    else:
        zip_path = 'custom-archive/' + re_code + '/' + uuid + '.zip'

    # use the last bucket to upload zip package
    upload_id = bucket.init_multipart_upload(zip_path).upload_id

    with open(zipname, 'rb') as fileobj:
        parts = []
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            result = bucket.upload_part(
                zip_path, upload_id, part_number,
                oss2.SizedFileAdapter(fileobj, num_to_upload))
            parts.append(oss2.models.PartInfo(part_number, result.etag))
            offset += num_to_upload
            part_number += 1

        bucket.complete_multipart_upload(zip_path, upload_id, parts)

    zip_meta = bucket.head_object(zip_path)
    zip_content_type = zip_meta.headers.get('Content-Type')

    status = '200 OK'
    response_headers = [('Content-type', 'application/json')]
    start_response(status, response_headers)
    url = oss_protocol + "://" + oss_bucket_name + "." + oss_endpoint + "/" + zip_path
    data = json.dumps({
        "host": url,
        "protocol": oss_protocol,
        "bucket": oss_bucket_name,
        "endpoint": oss_endpoint,
        "path": zip_path,
        "original_name": re_code + ".zip",
        "type": zip_content_type,
    })
    return [data]
Exemplo n.º 8
0
total_size = os.path.getsize(filename)
part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)

# 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
key = 'remote-multipart2.txt'
upload_id = bucket.init_multipart_upload(key).upload_id

# 逐个上传分片
# 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于num_to_upload
with open(filename, 'rb') as fileobj:
    parts = []
    part_number = 1
    offset = 0
    while offset < total_size:
        num_to_upload = min(part_size, total_size - offset)
        result = bucket.upload_part(
            key, upload_id, part_number,
            oss2.SizedFileAdapter(fileobj, num_to_upload))
        parts.append(oss2.models.PartInfo(part_number, result.etag))

        offset += num_to_upload
        part_number += 1

    # 完成分片上传
    bucket.complete_multipart_upload(key, upload_id, parts)

# 验证一下
with open(filename, 'rb') as fileobj:
    assert bucket.get_object(key).read() == fileobj.read()

os.remove(filename)
Exemplo n.º 9
0
 def _upload(part_id, offset):
     self.logger.debug('Uploading %s part %s', name, part_id)
     result = bucket.upload_part(name, upload_id, part_id, oss2.SizedFileAdapter(content, multipart_chunksize))
     self.logger.debug('Done %s part %s', name, part_id)
     parts.append(oss2.models.PartInfo(part_id, result.etag, size=multipart_chunksize, part_crc=result.crc))