Example #1
0
def fill_container(
    rgw,
    container_name,
    user_id,
    oc,
    cc,
    size,
    multipart=False,
    split_size=0,
    header=None,
):
    swift_object_name = utils.gen_s3_object_name(
        "%s.container.%s" % (user_id, cc), oc)
    log.info("object name: %s" % swift_object_name)
    object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
    log.info("object path: %s" % object_path)
    data_info = manage_data.io_generator(object_path, size)
    # upload object
    if multipart == True:
        mp_dir = os.path.join(TEST_DATA_PATH, swift_object_name + ".mp.parts")
        log.info(f"mp part dir: {mp_dir}")
        log.info("making multipart object part dir")
        mkdir = utils.exec_shell_cmd("sudo mkdir %s" % mp_dir)
        if mkdir is False:
            raise TestExecError("mkdir failed creating mp_dir_name")
        utils.split_file(object_path, split_size, mp_dir + "/")
        parts_list = sorted(glob.glob(mp_dir + "/" + "*"))
        log.info("parts_list: %s" % parts_list)
        log.info("no of parts: %s" % len(parts_list))
        for each_part in parts_list:
            log.info("trying to upload part: %s" % each_part)
            with open(each_part, "r") as fp:
                etag = rgw.put_object(
                    container_name,
                    swift_object_name + "/" + each_part,
                    contents=fp.read(),
                    content_type="text/plain",
                    headers=header,
                )
        return swift_object_name
    else:
        if data_info is False:
            raise TestExecError("data creation failed")
        log.info("uploading object: %s" % object_path)
        with open(object_path, "r") as fp:
            rgw.put_object(
                container_name,
                swift_object_name,
                contents=fp.read(),
                content_type="text/plain",
                headers=header,
            )
        return swift_object_name
def upload_mutipart_object(s3_object_name,
                           bucket,
                           TEST_DATA_PATH,
                           config,
                           user_info,
                           append_data=False,
                           append_msg=None):
    log.info('s3 object name: %s' % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info('s3 object path: %s' % s3_object_path)
    s3_object_size = config.obj_size
    split_size = config.split_size if hasattr(config, 'split_size') else 5
    log.info('split size: %s' % split_size)
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op='append',
            **{'message': '\n%s' % append_msg})
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    mp_dir = os.path.join(TEST_DATA_PATH, s3_object_name + '.mp.parts')
    log.info('mp part dir: %s' % mp_dir)
    log.info('making multipart object part dir')
    mkdir = utils.exec_shell_cmd('sudo mkdir %s' % mp_dir)
    if mkdir is False:
        raise TestExecError('mkdir failed creating mp_dir_name')
    utils.split_file(s3_object_path, split_size, mp_dir + "/")
    parts_list = sorted(glob.glob(mp_dir + '/' + '*'))
    log.info('parts_list: %s' % parts_list)
    log.info('uploading s3 object: %s' % s3_object_path)
    upload_info = dict(
        {
            'access_key': user_info['access_key'],
            'upload_type': 'multipart'
        }, **data_info)
    s3_obj = s3lib.resource_op({
        'obj': bucket,
        'resource': 'Object',
        'args': [s3_object_name],
    })
    log.info('initiating multipart upload')
    mpu = s3lib.resource_op({
        'obj': s3_obj,
        'resource': 'initiate_multipart_upload',
        'args': None,
        'extra_info': upload_info
    })
    part_number = 1
    parts_info = {'Parts': []}
    log.info('no of parts: %s' % len(parts_list))
    for each_part in parts_list:
        log.info('trying to upload part: %s' % each_part)
        part = mpu.Part(part_number)
        # part_upload_response = part.upload(Body=open(each_part))
        part_upload_response = s3lib.resource_op({
            'obj':
            part,
            'resource':
            'upload',
            'kwargs':
            dict(Body=open(each_part, mode="rb"))
        })
        if part_upload_response is not False:
            response = HttpResponseParser(part_upload_response)
            if response.status_code == 200:
                log.info('part uploaded')
                if config.local_file_delete is True:
                    log.info('deleting local file part')
                    utils.exec_shell_cmd('sudo rm -rf %s' % each_part)
            else:
                raise TestExecError("part uploading failed")
        part_info = {
            'PartNumber': part_number,
            'ETag': part_upload_response['ETag']
        }
        parts_info['Parts'].append(part_info)
        if each_part != parts_list[-1]:
            # increase the part number only if the current part is not the last part
            part_number += 1
        log.info('curr part_number: %s' % part_number)
    # log.info('parts_info so far: %s'% parts_info)
    if len(parts_list) == part_number:
        log.info('all parts upload completed')
        mpu.complete(MultipartUpload=parts_info)
        log.info('multipart upload complete for key: %s' % s3_object_name)
Example #3
0
def upload_mutipart_object(
    s3_object_name,
    bucket,
    TEST_DATA_PATH,
    config,
    user_info,
    append_data=False,
    append_msg=None,
):
    log.info("s3 object name: %s" % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info("s3 object path: %s" % s3_object_path)
    s3_object_size = config.obj_size
    split_size = config.split_size if hasattr(config, "split_size") else 5
    log.info("split size: %s" % split_size)
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op="append",
            **{"message": "\n%s" % append_msg},
        )
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    mp_dir = os.path.join(TEST_DATA_PATH, s3_object_name + ".mp.parts")
    log.info("mp part dir: %s" % mp_dir)
    log.info("making multipart object part dir")
    mkdir = utils.exec_shell_cmd("sudo mkdir %s" % mp_dir)
    if mkdir is False:
        raise TestExecError("mkdir failed creating mp_dir_name")
    utils.split_file(s3_object_path, split_size, mp_dir + "/")
    parts_list = sorted(glob.glob(mp_dir + "/" + "*"))
    log.info("parts_list: %s" % parts_list)
    log.info("uploading s3 object: %s" % s3_object_path)
    upload_info = dict(
        {
            "access_key": user_info["access_key"],
            "upload_type": "multipart"
        }, **data_info)
    s3_obj = s3lib.resource_op({
        "obj": bucket,
        "resource": "Object",
        "args": [s3_object_name],
    })
    log.info("initiating multipart upload")
    mpu = s3lib.resource_op({
        "obj": s3_obj,
        "resource": "initiate_multipart_upload",
        "args": None,
        "extra_info": upload_info,
    })
    part_number = 1
    parts_info = {"Parts": []}
    log.info("no of parts: %s" % len(parts_list))
    for each_part in parts_list:
        log.info("trying to upload part: %s" % each_part)
        part = mpu.Part(part_number)
        # part_upload_response = part.upload(Body=open(each_part))
        part_upload_response = s3lib.resource_op({
            "obj":
            part,
            "resource":
            "upload",
            "kwargs":
            dict(Body=open(each_part, mode="rb")),
        })
        if part_upload_response is not False:
            response = HttpResponseParser(part_upload_response)
            if response.status_code == 200:
                log.info("part uploaded")
                if config.local_file_delete is True:
                    log.info("deleting local file part")
                    utils.exec_shell_cmd("sudo rm -rf %s" % each_part)
            else:
                raise TestExecError("part uploading failed")
        part_info = {
            "PartNumber": part_number,
            "ETag": part_upload_response["ETag"]
        }
        parts_info["Parts"].append(part_info)
        if each_part != parts_list[-1]:
            # increase the part number only if the current part is not the last part
            part_number += 1
        log.info("curr part_number: %s" % part_number)
    # log.info('parts_info so far: %s'% parts_info)
    if len(parts_list) == part_number:
        log.info("all parts upload completed")
        mpu.complete(MultipartUpload=parts_info)
        log.info("multipart upload complete for key: %s" % s3_object_name)