示例#1
0
def put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config):
    bucket_life_cycle = s3lib.resource_op({'obj': rgw_conn,
                                           'resource': 'BucketLifecycleConfiguration',
                                           'args': [bucket.name]})
    put_bucket_life_cycle = s3lib.resource_op({"obj": bucket_life_cycle,
                                               "resource": "put",
                                               "kwargs": dict(LifecycleConfiguration=life_cycle_rule)})
    log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle)
    if put_bucket_life_cycle is False:
        raise TestExecError("Resource execution failed: put bucket lifecycle failed")
    if put_bucket_life_cycle is not None:
        response = HttpResponseParser(put_bucket_life_cycle)
        if response.status_code == 200:
            log.info('bucket life cycle added')
        else:
            raise TestExecError("bucket lifecycle addition failed")
    log.info('trying to retrieve bucket lifecycle config')
    get_bucket_life_cycle_config = s3lib.resource_op({"obj": rgw_conn2,
                                                      "resource": 'get_bucket_lifecycle_configuration',
                                                      "kwargs": dict(Bucket=bucket.name)
                                                      })
    if get_bucket_life_cycle_config is False:
        raise TestExecError("bucket lifecycle config retrieval failed")
    if get_bucket_life_cycle_config is not None:
        response = HttpResponseParser(get_bucket_life_cycle_config)
        if response.status_code == 200:
            log.info('bucket life cycle retrieved')
        else:
            raise TestExecError("bucket lifecycle config retrieval failed")
    else:
        raise TestExecError("bucket life cycle retrieved")
    objs_total = (config.test_ops['version_count']) * (config.objects_count)
    for rule in config.lifecycle_conf:
        if rule.get('Expiration', {}).get('Date', False):
            # todo: need to get the interval value from yaml file
            log.info("wait for 60 seconds")
            time.sleep(60)
        else:
            for time_interval in range(19):
                bucket_stats_op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket=%s" % bucket.name)
                json_doc1 = json.loads(bucket_stats_op)
                obj_pre_lc = json_doc1['usage']['rgw.main']['num_objects']
                if obj_pre_lc == objs_total:
                    time.sleep(30)
                else:
                    raise TestExecError("Objects expired before the expected days")
            time.sleep(60)

    log.info('testing if lc is applied via the radosgw-admin cli')
    op = utils.exec_shell_cmd("radosgw-admin lc list")
    json_doc = json.loads(op)
    for i, entry in enumerate(json_doc):
        print(i)
        print(entry['status'])
        if entry['status'] == 'COMPLETE' or entry['status'] == 'PROCESSING':
            log.info('LC is applied on the bucket')
        else:
            log.info('LC is not applied')
示例#2
0
def put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                  life_cycle_rule):
    bucket_life_cycle = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'BucketLifecycleConfiguration',
        'args': [bucket.name]
    })
    put_bucket_life_cycle = s3lib.resource_op({
        "obj":
        bucket_life_cycle,
        "resource":
        "put",
        "kwargs":
        dict(LifecycleConfiguration=life_cycle_rule)
    })
    log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle)
    if put_bucket_life_cycle is False:
        raise TestExecError(
            "Resource execution failed: put bucket lifecycle failed")
    if put_bucket_life_cycle is not None:
        response = HttpResponseParser(put_bucket_life_cycle)
        if response.status_code == 200:
            log.info('bucket life cycle added')
        else:
            raise TestExecError("bucket lifecycle addition failed")
    log.info('trying to retrieve bucket lifecycle config')
    get_bucket_life_cycle_config = s3lib.resource_op({
        "obj":
        rgw_conn2,
        "resource":
        'get_bucket_lifecycle_configuration',
        "kwargs":
        dict(Bucket=bucket.name)
    })
    if get_bucket_life_cycle_config is False:
        raise TestExecError("bucket lifecycle config retrieval failed")
    if get_bucket_life_cycle_config is not None:
        response = HttpResponseParser(get_bucket_life_cycle_config)
        if response.status_code == 200:
            log.info('bucket life cycle retrieved')
        else:
            raise TestExecError("bucket lifecycle config retrieval failed")
    else:
        raise TestExecError("bucket life cycle retrieved")
    time.sleep(100)
    log.info('testing if lc is applied via the radosgw-admin cli')
    op = utils.exec_shell_cmd("radosgw-admin lc list")
    json_doc = json.loads(op)
    for i, entry in enumerate(json_doc):
        print(i)
        print(entry['status'])
        if entry['status'] != 'COMPLETE':
            log.info('LC is not completed, failed')
        else:
            log.info('LC is completed')
def test_acls_private(u1_rgw_conn, u1, u2_rgw_conn, u1_bucket, u2_bucket):
    # test for acl: private
    s3_ops = ResourceOps()
    u1_bucket_acl = s3_ops.resource_op(u1_rgw_conn, 'BucketAcl',
                                       u1_bucket.name)
    log.info('setting bucket acl: %s' % ACLS[0])
    u1_bucket_acl.put(ACL=ACLS[0])
    # access bucket_info of u1_bucket from u2
    log.info('u1 bucket info')
    u1_bucket_info = s3_ops.resource_op(u1_rgw_conn, 'Bucket', u1_bucket.name)
    log.info(u1_bucket_info.name)
    log.info(u1_bucket_info.creation_date)
    log.info(u1_bucket_info.load())
    log.info(
        'trying to access u1 bucket info from u2 after setting u1 bucket acls to private'
    )
    access_u1_bucket_from_u2 = s3_ops.resource_op(u2_rgw_conn, 'Bucket',
                                                  u1_bucket.name)
    log.info('tryring to delete u1_bucket from u2')
    u1_bucket_deleted_response = access_u1_bucket_from_u2.delete()
    try:
        response = HttpResponseParser(u1_bucket_deleted_response)
    except Exception as e:
        log.info(
            'error deleting bucket as there is no permission on u1_bucket')
    else:
        raise TestExecError(
            "bucket access should be restricted, but delete excuted")
def delete_objects(bucket):
    """
    deletes the objects in a given bucket
    :param bucket: S3Bucket object
    """
    log.info("listing all objects in bucket: %s" % bucket.name)
    objects = s3lib.resource_op({"obj": bucket, "resource": "objects", "args": None})
    log.info("objects :%s" % objects)
    all_objects = s3lib.resource_op({"obj": objects, "resource": "all", "args": None})
    log.info("all objects: %s" % all_objects)
    for obj in all_objects:
        log.info("object_name: %s" % obj.key)
    log.info("deleting all objects in bucket")
    objects_deleted = s3lib.resource_op(
        {"obj": objects, "resource": "delete", "args": None}
    )
    log.info("objects_deleted: %s" % objects_deleted)
    if objects_deleted is False:
        raise TestExecError("Resource execution failed: Object deletion failed")
    if objects_deleted is not None:
        response = HttpResponseParser(objects_deleted[0])
        if response.status_code == 200:
            log.info("objects deleted ")
        else:
            raise TestExecError("objects deletion failed")
    else:
        raise TestExecError("objects deletion failed")
示例#5
0
def create_bucket(rgw_conn, user_info, rand_no=0):

    s3_ops = ResourceOps()

    bucket_name_to_create = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no)

    log.info('creating bucket with name: %s' % bucket_name_to_create)

    bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)

    created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': user_info['access_key']})

    if created is False:
        raise TestExecError("Resource execution failed: bucket creation faield")

    if created is not None:

        response = HttpResponseParser(created)

        if response.status_code == 200:
            log.info('bucket created')

        else:
            raise TestExecError("bucket creation failed")

    return bucket
def test_acls_public_write(u1_rgw_conn, u1, u2_rgw_conn, u1_bucket, u2_bucket):
    # test for acl: public-read-write
    s3_ops = ResourceOps()
    u1_bucket_acl = s3_ops.resource_op(u1_rgw_conn, 'BucketAcl',
                                       u1_bucket.name)
    log.info('setting bucket acl: %s' % ACLS[2])
    u1_bucket_acl.put(ACL=ACLS[2])
    # access bucket_info of u1_bucket from u2
    log.info('u1 bucket info')
    u1_bucket_info = s3_ops.resource_op(u1_rgw_conn, 'Bucket', u1_bucket.name)
    log.info(u1_bucket_info.name)
    log.info(u1_bucket_info.creation_date)
    log.info(u1_bucket_info.load())
    log.info(
        'trying to access u1 bucket info from u2 after setting u1 bucket acls to public-read-write'
    )
    access_u1_bucket_from_u2 = s3_ops.resource_op(u2_rgw_conn, 'Bucket',
                                                  u1_bucket.name)
    log.info('tryring to delete u1_bucket from u2')
    u1_bucket_deleted_response = access_u1_bucket_from_u2.delete()
    response = HttpResponseParser(u1_bucket_deleted_response)
    if response.status_code == 204:
        log.info('u1 bucket deleted from u2')
    else:
        log.info('error in bucket deletion')
        raise TestExecError("error in bucket deletion")
def enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info):
    log.info('bucket versionig test on bucket: %s' % bucket.name)
    # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
    bucket_versioning = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'BucketVersioning',
        'args': [bucket.name]
    })
    # checking the versioning status
    # version_status = s3_ops.resource_op(bucket_versioning, 'status')
    version_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'status',
        'args': None
    })
    if version_status is None:
        log.info('bucket versioning still not enabled')
    # enabling bucket versioning
    # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
    version_enable_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'enable',
        'args': None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info('version enabled')
        write_bucket_io_info.add_versioning_status(user_info['access_key'],
                                                   bucket.name, 'enabled')
    else:
        raise TestExecError("version enable failed")
示例#8
0
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'BucketVersioning',
        'args': [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'status',
        'args': None
    })
    if version_status is None:
        log.info('bucket versioning still not enabled')
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'enable',
        'args': None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info('version enabled')
    else:
        raise TestExecError("version enable failed")
    return bucket
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "BucketVersioning",
        "args": [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "status",
        "args": None
    })
    if version_status is None:
        log.info("bucket versioning still not enabled")
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "enable",
        "args": None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info("version enabled")
    else:
        raise TestExecError("version enable failed")
    return bucket
示例#10
0
def create_bucket(bucket_name, rgw, user_info):
    log.info("creating bucket with name: %s" % bucket_name)
    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
    bucket = s3lib.resource_op({
        "obj": rgw,
        "resource": "Bucket",
        "args": [bucket_name]
    })
    created = s3lib.resource_op({
        "obj": bucket,
        "resource": "create",
        "args": None,
        "extra_info": {
            "access_key": user_info["access_key"]
        },
    })
    if created is False:
        raise TestExecError(
            "Resource execution failed: bucket creation failed")
    if created is not None:
        response = HttpResponseParser(created)
        if response.status_code == 200:
            log.info("bucket created")
        else:
            raise TestExecError("bucket creation failed")
    else:
        raise TestExecError("bucket creation failed")
    return bucket
示例#11
0
def delete_bucket(bucket):
    """
    deletes a given bucket
    :param bucket: s3Bucket object
    """
    log.info("listing objects if any")
    objs = bucket.objects.all()
    count = sum(1 for _ in bucket.objects.all())
    if count > 0:
        log.info(f"objects not deleted, count is:{count}")
        for ob in objs:
            log.info(f"object: {ob.key}")

    log.info("deleting bucket: %s" % bucket.name)
    bucket_deleted_response = s3lib.resource_op({
        "obj": bucket,
        "resource": "delete",
        "args": None
    })
    log.info("bucket_deleted_status: %s" % bucket_deleted_response)
    if bucket_deleted_response is not None and isinstance(
            bucket_deleted_response, dict):
        response = HttpResponseParser(bucket_deleted_response)
        if response.status_code == 204:
            log.info("bucket deleted ")
        else:
            raise TestExecError("bucket deletion failed")
    else:
        raise TestExecError("bucket deletion failed")
示例#12
0
def enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info):
    log.info("bucket versioning test on bucket: %s" % bucket.name)
    # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
    bucket_versioning = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "BucketVersioning",
        "args": [bucket.name]
    })
    # checking the versioning status
    # version_status = s3_ops.resource_op(bucket_versioning, 'status')
    version_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "status",
        "args": None
    })
    if version_status is None:
        log.info("bucket versioning still not enabled")
    # enabling bucket versioning
    # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
    version_enable_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "enable",
        "args": None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info("version enabled")
        write_bucket_io_info.add_versioning_status(user_info["access_key"],
                                                   bucket.name, "enabled")
    else:
        raise TestExecError("version enable failed")
示例#13
0
def delete_bucket(bucket):
    """
    deletes a given bucket
    :param bucket: s3Bucket object
    """
    log.info('listing objects if any')
    objs = bucket.objects.all()
    count = sum(1 for _ in bucket.objects.all())
    if count > 0:
        log.info(f'objects not deleted, count is:{count}')
        for ob in objs:
            log.info(f'object: {ob.key}')

    log.info('deleting bucket: %s' % bucket.name)
    bucket_deleted_response = s3lib.resource_op({
        'obj': bucket,
        'resource': 'delete',
        'args': None
    })
    log.info('bucket_deleted_status: %s' % bucket_deleted_response)
    if bucket_deleted_response is not None and isinstance(
            bucket_deleted_response, dict):
        response = HttpResponseParser(bucket_deleted_response)
        if response.status_code == 204:
            log.info('bucket deleted ')
        else:
            raise TestExecError("bucket deletion failed")
    else:
        raise TestExecError("bucket deletion failed")
示例#14
0
def create_bucket(bucket_name, rgw, user_info):
    log.info('creating bucket with name: %s' % bucket_name)
    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
    bucket = s3lib.resource_op({
        'obj': rgw,
        'resource': 'Bucket',
        'args': [bucket_name]
    })
    created = s3lib.resource_op({
        'obj': bucket,
        'resource': 'create',
        'args': None,
        'extra_info': {
            'access_key': user_info['access_key']
        }
    })
    if created is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if created is not None:
        response = HttpResponseParser(created)
        if response.status_code == 200:
            log.info('bucket created')
        else:
            raise TestExecError("bucket creation failed")
    else:
        raise TestExecError("bucket creation failed")
    return bucket
示例#15
0
def delete_objects(bucket):
    """
    deletes the objects in a given bucket
    :param bucket: S3Bucket object
    """
    log.info('listing all objects in bucket: %s' % bucket.name)
    objects = s3lib.resource_op({'obj': bucket,
                                 'resource': 'objects',
                                 'args': None})
    log.info('objects :%s' % objects)
    all_objects = s3lib.resource_op({'obj': objects,
                                     'resource': 'all',
                                     'args': None})
    log.info('all objects: %s' % all_objects)
    for obj in all_objects:
        log.info('object_name: %s' % obj.key)
    log.info('deleting all objects in bucket')
    objects_deleted = s3lib.resource_op({'obj': objects,
                                         'resource': 'delete',
                                         'args': None})
    log.info('objects_deleted: %s' % objects_deleted)
    if objects_deleted is False:
        raise TestExecError('Resource execution failed: Object deletion failed')
    if objects_deleted is not None:
        response = HttpResponseParser(objects_deleted[0])
        if response.status_code == 200:
            log.info('objects deleted ')
        else:
            raise TestExecError("objects deletion failed")
    else:
        raise TestExecError("objects deletion failed")
示例#16
0
def put_bucket_lifecycle(bucket, rgw_conn, rgw_conn2, life_cycle_rule):
    """
    Set/Put lifecycle to provided bucket
    """
    bucket_life_cycle = s3lib.resource_op(
        {
            "obj": rgw_conn,
            "resource": "BucketLifecycleConfiguration",
            "args": [bucket.name],
        }
    )
    put_bucket_life_cycle = s3lib.resource_op(
        {
            "obj": bucket_life_cycle,
            "resource": "put",
            "kwargs": dict(LifecycleConfiguration=life_cycle_rule),
        }
    )
    log.info(f"put bucket life cycle:\n{put_bucket_life_cycle}")
    if not put_bucket_life_cycle:
        raise TestExecError("Resource execution failed: put bucket lifecycle failed")
    if put_bucket_life_cycle:
        response = HttpResponseParser(put_bucket_life_cycle)
        if response.status_code == 200:
            log.info("bucket life cycle added")
        else:
            raise TestExecError("bucket lifecycle addition failed")
    log.info("trying to retrieve bucket lifecycle config")
    get_bucket_life_cycle_config = s3lib.resource_op(
        {
            "obj": rgw_conn2,
            "resource": "get_bucket_lifecycle_configuration",
            "kwargs": dict(Bucket=bucket.name),
        }
    )
    if not get_bucket_life_cycle_config:
        raise TestExecError("bucket lifecycle config retrieval failed")
    if get_bucket_life_cycle_config:
        response = HttpResponseParser(get_bucket_life_cycle_config)
        if response.status_code == 200:
            log.info("bucket life cycle retrieved")
        else:
            raise TestExecError("bucket lifecycle config retrieval failed")
    else:
        raise TestExecError("bucket life cycle retrieved")
    lc_data = json.loads(utils.exec_shell_cmd("radosgw-admin lc list"))
    log.info(f"lc data is {lc_data}")
def test_exec(config, requester):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    log.info('requester type: %s' % requester)

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        # create buckets
        log.info('no of buckets to create: %s' % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                each_user['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = resuables.create_bucket(bucket_name=bucket_name_to_create,
                                             rgw=rgw_conn,
                                             user_info=each_user)
            bucket_request_payer = s3lib.resource_op({
                'obj': rgw_conn,
                'resource': 'BucketRequestPayment',
                'args': [bucket.name]
            })
            # change the bucket request payer to 'requester'
            payer = {'Payer': requester}
            response = s3lib.resource_op({
                'obj':
                bucket_request_payer,
                'resource':
                'put',
                'kwargs':
                dict(RequestPaymentConfiguration=payer)
            })
            log.info(response)
            if response is not None:
                response = HttpResponseParser(response)
                if response.status_code == 200:
                    log.info('bucket created')
                else:
                    raise TestExecError(
                        "bucket request payer modification failed")
            else:
                raise TestExecError("bucket request payer modification failed")
            payer = bucket_request_payer.payer
            log.info('bucket request payer: %s' % payer)
            if payer != 'Requester':
                TestExecError('Request payer is not set or changed properly ')
            log.info('s3 objects to create: %s' % config.objects_count)
            if config.objects_count is not None:
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    resuables.upload_object(s3_object_name, bucket,
                                            TEST_DATA_PATH, config, each_user)
示例#18
0
def test_acls_public_read(u1_rgw_conn, u1, u2_rgw_conn, u1_bucket, u2_bucket):
    # test for public_read
    s3_ops = ResourceOps()
    u1_bucket_acl = s3_ops.resource_op(u1_rgw_conn, 'BucketAcl',
                                       u1_bucket.name)
    log.info('setting bucket acl: %s' % ACLS[1])
    u1_bucket_acl.put(ACL=ACLS[1])
    # access bucket_info of u1_bucket from u2
    log.info('u1 bucket info')
    u1_bucket_info = s3_ops.resource_op(u1_rgw_conn, 'Bucket', u1_bucket.name)
    log.info(u1_bucket_info.name)
    log.info(u1_bucket_info.creation_date)
    log.info(u1_bucket_info.load())
    s3_object_name = utils.gen_s3_object_name(u1_bucket.name, rand_no=0)
    log.info('s3 object name: %s' % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info('s3 object path: %s' % s3_object_path)
    s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                         config.objects_size_range['max'])
    data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    log.info('uploading s3 object: %s' % s3_object_path)
    upload_info = dict({'access_key': u1['access_key']}, **data_info)
    object_uploaded_status = s3_ops.resource_op(u1_bucket, 'upload_file',
                                                s3_object_path, s3_object_name,
                                                **upload_info)
    if object_uploaded_status is False:
        raise TestExecError("Resource execution failed: object upload failed")
    if object_uploaded_status is None:
        log.info('object uploaded')
    log.info(
        'trying to access u1 bucket and its objects info from u2 after setting u1 bucket acls to public read'
    )
    access_u1_bucket_from_u2 = s3_ops.resource_op(u2_rgw_conn, 'Bucket',
                                                  u1_bucket.name)
    try:
        all_objects = access_u1_bucket_from_u2.objects.all()
        for obj in all_objects:
            log.info('obj name: %s' % obj.key)
    except Exception as e:
        msg = 'access given to read, but still failing to read'
        raise TestExecError(msg)
    log.info('tryring to delete u1_bucket from u2')
    try:
        u1_bucket_deleted_response = access_u1_bucket_from_u2.delete()
        response = HttpResponseParser(u1_bucket_deleted_response)
        log.info(response)
    except Exception as e:
        msg = 'access not given to write, hence fialing'
        log.info(msg)
    else:
        raise TestExecError("acces not given, but still bucket got deleted")
示例#19
0
def delete_version_object(
    bucket,
    s3_object_name,
    s3_object_path,
    rgw_conn,
    user_info,
):
    """
    deletes single object and its versions
    :param bucket: S3bucket object
    :param s3_object_name: s3 object name
    :param s3_object_path: path of the object created in the client
    :param rgw_conn: rgw connection
    :param user_info: user info dict containing access_key, secret_key and user_id
    """
    versions = bucket.object_versions.filter(Prefix=s3_object_name)
    log.info("deleting s3_obj keys and its versions")
    s3_obj = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "Object",
        "args": [bucket.name, s3_object_name]
    })
    log.info("deleting versions for s3 obj: %s" % s3_object_name)
    for version in versions:
        log.info("trying to delete obj version: %s" % version.version_id)
        del_obj_version = s3lib.resource_op({
            "obj":
            s3_obj,
            "resource":
            "delete",
            "kwargs":
            dict(VersionId=version.version_id),
        })
        log.info("response:\n%s" % del_obj_version)
        if del_obj_version is not None:
            response = HttpResponseParser(del_obj_version)
            if response.status_code == 204:
                log.info("version deleted ")
                write_key_io_info.delete_version_info(
                    user_info["access_key"],
                    bucket.name,
                    s3_object_path,
                    version.version_id,
                )
            else:
                raise TestExecError("version  deletion failed")
        else:
            raise TestExecError("version deletion failed")
    log.info("available versions for the object")
    versions = bucket.object_versions.filter(Prefix=s3_object_name)
    for version in versions:
        log.info("key_name: %s --> version_id: %s" %
                 (version.object_key, version.version_id))
示例#20
0
def delete_version_object(
    bucket,
    s3_object_name,
    s3_object_path,
    rgw_conn,
    user_info,
):
    """
    deletes single object and its versions
    :param bucket: S3bucket object
    :param s3_object_name: s3 object name
    :param s3_object_path: path of the object created in the client
    :param rgw_conn: rgw connection
    :param user_info: user info dict containing access_key, secret_key and user_id

    """
    versions = bucket.object_versions.filter(Prefix=s3_object_name)
    log.info('deleting s3_obj keys and its versions')
    s3_obj = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'Object',
        'args': [bucket.name, s3_object_name]
    })
    log.info('deleting versions for s3 obj: %s' % s3_object_name)
    for version in versions:
        log.info('trying to delete obj version: %s' % version.version_id)
        del_obj_version = s3lib.resource_op({
            'obj':
            s3_obj,
            'resource':
            'delete',
            'kwargs':
            dict(VersionId=version.version_id)
        })
        log.info('response:\n%s' % del_obj_version)
        if del_obj_version is not None:
            response = HttpResponseParser(del_obj_version)
            if response.status_code == 204:
                log.info('version deleted ')
                write_key_io_info.delete_version_info(user_info['access_key'],
                                                      bucket.name,
                                                      s3_object_path,
                                                      version.version_id)
            else:
                raise TestExecError("version  deletion failed")
        else:
            raise TestExecError("version deletion failed")
    log.info('available versions for the object')
    versions = bucket.object_versions.filter(Prefix=s3_object_name)
    for version in versions:
        log.info('key_name: %s --> version_id: %s' %
                 (version.object_key, version.version_id))
示例#21
0
def enable_mfa_versioning(bucket, rgw_conn, SEED, serial, user_info,
                          write_bucket_io_info):
    log.info("bucket MFA and versioning test on bucket: %s" % bucket.name)
    bucket_versioning = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "BucketVersioning",
        "args": [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "status",
        "args": None
    })
    if version_status is None:
        log.info("bucket mfa and versioning still not enabled")

    # generate MFA token to authenticate
    token = generate_totp(SEED)
    mfa_token = serial + " " + token

    # put mfa and bucket versioning
    mfa_version_put = s3lib.resource_op({
        "obj":
        bucket_versioning,
        "resource":
        "put",
        "kwargs":
        dict(
            MFA=(mfa_token),
            VersioningConfiguration={
                "MFADelete": "Enabled",
                "Status": "Enabled"
            },
            ExpectedBucketOwner=user_info["user_id"],
        ),
    })

    if mfa_version_put is False:
        return token, mfa_version_put

    response = HttpResponseParser(mfa_version_put)
    if response.status_code == 200:
        log.info("MFA and version enabled")
    else:
        raise MFAVersionError("bucket mfa and versioning enable failed")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    extra_user = s3lib.create_users(1)[0]
    extra_user_auth = Auth(extra_user, ssl=config.ssl)
    extra_user_conn = extra_user_auth.do_auth()
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        s3_object_names = []
        # create buckets
        log.info('no of buckets to create: %s' % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = s3lib.resource_op({'obj': rgw_conn,
                                        'resource': 'Bucket',
                                        'args': [bucket_name_to_create]})
            # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
            created = s3lib.resource_op({'obj': bucket,
                                         'resource': 'create',
                                         'args': None,
                                         'extra_info': {'access_key': each_user['access_key']}})
            if created is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if created is not None:
                response = HttpResponseParser(created)
                if response.status_code == 200:
                    log.info('bucket created')
                else:
                    raise TestExecError("bucket creation failed")
            else:
                raise TestExecError("bucket creation failed")
            # getting bucket version object
            if config.test_ops['enable_version'] is True:
                log.info('bucket versionig test on bucket: %s' % bucket.name)
                # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                bucket_versioning = s3lib.resource_op({'obj': rgw_conn,
                                                       'resource': 'BucketVersioning',
                                                       'args': [bucket.name]})
                # checking the versioning status
                # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                    'resource': 'status',
                                                    'args': None
                                                    })
                if version_status is None:
                    log.info('bucket versioning still not enabled')
                # enabling bucket versioning
                # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                version_enable_status = s3lib.resource_op({'obj': bucket_versioning,
                                                           'resource': 'enable',
                                                           'args': None,
                                                           })
                response = HttpResponseParser(version_enable_status)
                if response.status_code == 200:
                    log.info('version enabled')
                    write_bucket_io_info.add_versioning_status(each_user['access_key'],bucket.name,
                                                               VERSIONING_STATUS['ENABLED'])

                else:
                    raise TestExecError("version enable failed")
                if config.objects_count > 0:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, s3_object_size in list(config.mapped_sizes.items()):
                        # versioning upload
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                        s3_object_names.append(s3_object_name)
                        log.info('s3 object name: %s' % s3_object_name)
                        log.info('versioning count: %s' % config.version_count)
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        original_data_info = manage_data.io_generator(s3_object_path, s3_object_size)
                        if original_data_info is False:
                            TestExecError("data creation failed")
                        created_versions_count = 0
                        for vc in range(config.version_count):
                            log.info('version count for %s is %s' % (s3_object_name, str(vc)))
                            log.info('modifying data: %s' % s3_object_name)
                            modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size,
                                                                          op='append',
                                                                          **{'message': '\nhello for version: %s\n'
                                                                                        % str(vc)})
                            if modified_data_info is False:
                                TestExecError("data modification failed")
                            log.info('uploading s3 object: %s' % s3_object_path)
                            upload_info = dict({'access_key': each_user['access_key'],
                                                'versioning_status': VERSIONING_STATUS['ENABLED'],
                                                'version_count_no': vc}, **modified_data_info)
                            s3_obj = s3lib.resource_op({'obj': bucket,
                                                        'resource': 'Object',
                                                        'args': [s3_object_name],
                                                        'extra_info': upload_info, })
                            object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                                        'resource': 'upload_file',
                                                                        'args': [modified_data_info['name']],
                                                                        'extra_info': upload_info})
                            if object_uploaded_status is False:
                                raise TestExecError("Resource execution failed: object upload failed")
                            if object_uploaded_status is None:
                                log.info('object uploaded')
                                s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                                log.info('current_version_id: %s' % s3_obj.version_id)
                                key_version_info = basic_io_structure.version_info(
                                    **{'version_id': s3_obj.version_id,
                                       'md5_local': upload_info['md5'],
                                       'count_no': vc,
                                       'size': upload_info['size']})
                                log.info('key_version_info: %s' % key_version_info)
                                write_key_io_info.add_versioning_info(each_user['access_key'], bucket.name,
                                                                      s3_object_path, key_version_info)
                                created_versions_count += 1
                                log.info('created_versions_count: %s' % created_versions_count)
                                log.info('adding metadata')
                                metadata1 = {"m_data1": "this is the meta1 for this obj"}
                                s3_obj.metadata.update(metadata1)
                                metadata2 = {"m_data2": "this is the meta2 for this obj"}
                                s3_obj.metadata.update(metadata2)
                                log.info('metadata for this object: %s' % s3_obj.metadata)
                                log.info('metadata count for object: %s' % (len(s3_obj.metadata)))
                                if not s3_obj.metadata:
                                    raise TestExecError('metadata not created even adding metadata')
                                versions = bucket.object_versions.filter(Prefix=s3_object_name)
                                created_versions_count_from_s3 = len([v.version_id for v in versions])
                                log.info('created versions count on s3: %s' % created_versions_count_from_s3)
                                if created_versions_count is created_versions_count_from_s3:
                                    log.info('no new versions are created when added metdata')
                                else:
                                    raise TestExecError("version count missmatch, "
                                                        "possible creation of version on adding metadata")
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                            object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                          'resource': 'download_file',
                                                                          'args': [s3_object_name,
                                                                                   s3_object_download_path],
                                                                          })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            # checking md5 of the downloaded file
                            s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                            log.info('downloaded_md5: %s' % s3_object_downloaded_md5)
                            log.info('uploaded_md5: %s' % modified_data_info['md5'])
                            # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path)
                        log.info('all versions for the object: %s\n' % s3_object_name)
                        versions = bucket.object_versions.filter(Prefix=s3_object_name)
                        for version in versions:
                            log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                        if config.test_ops.get('set_acl', None) is True:
                            s3_obj_acl = s3lib.resource_op({'obj': rgw_conn,
                                                            'resource': 'ObjectAcl',
                                                            'args': [bucket.name, s3_object_name]})
                            # setting acl to private, just need to set to any acl and
                            # check if its set - check by response code
                            acls_set_status = s3_obj_acl.put(ACL='private')
                            response = HttpResponseParser(acls_set_status)
                            if response.status_code == 200:
                                log.info('ACLs set')
                            else:
                                raise TestExecError("Acls not Set")
                            # get obj details based on version id
                            for version in versions:
                                log.info('getting info for version id: %s' % version.version_id)
                                obj = s3lib.resource_op({'obj': rgw_conn,
                                                         'resource': 'Object',
                                                         'args': [bucket.name, s3_object_name]})
                                log.info('obj get detils :%s\n' % (obj.get(VersionId=version.version_id)))
                        if config.test_ops['copy_to_version'] is True:
                            # reverting object to one of the versions ( randomly chosen )
                            version_id_to_copy = random.choice([v.version_id for v in versions])
                            log.info('version_id_to_copy: %s' % version_id_to_copy)
                            s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                            log.info('current version_id: %s' % s3_obj.version_id)
                            copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name,
                                                                         'Key': s3_object_name,
                                                                         'VersionId': version_id_to_copy})
                            log.info('copy_response: %s' % copy_response)
                            if copy_response is None:
                                raise TestExecError("copy object from version id failed")
                            # current_version_id = copy_response['VersionID']
                            log.info('current_version_id: %s' % s3_obj.version_id)
                            # delete the version_id_to_copy object
                            s3_obj.delete(VersionId=version_id_to_copy)
                            log.info('all versions for the object after the copy operation: %s\n' % s3_object_name)
                            for version in versions:
                                log.info(
                                    'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                            # log.info('downloading current s3object: %s' % s3_object_name)
                            # s3_obj.download_file(s3_object_name + ".download")
                        if config.test_ops['delete_object_versions'] is True:
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, s3_object_name]})
                            log.info('deleting versions for s3 obj: %s' % s3_object_name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        write_key_io_info.delete_version_info(each_user['access_key'], bucket.name,
                                                                              s3_object_path, version.version_id)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                            log.info('available versions for the object')
                            versions = bucket.object_versions.filter(Prefix=s3_object_name)
                            for version in versions:
                                log.info('key_name: %s --> version_id: %s' % (
                                    version.object_key, version.version_id))
                        if config.test_ops.get('delete_from_extra_user') is True:
                            log.info('trying to delete objects from extra user')
                            s3_obj = s3lib.resource_op({'obj': extra_user_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, s3_object_name]})
                            log.info('deleting versions for s3 obj: %s' % s3_object_name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(
                                                                         VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not False:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        write_key_io_info.delete_version_info(each_user['access_key'],
                                                                              bucket.name,
                                                                              s3_object_path,
                                                                              version.version_id)
                                        raise TestExecError("version and deleted, this should not happen")
                                    else:
                                        log.info('version did not delete, expected behaviour')
                                else:
                                    log.info('version did not delete, expected behaviour')
                        if config.local_file_delete is True:
                            log.info('deleting local file')
                            utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
                if config.test_ops['suspend_version'] is True:
                    log.info('suspending versioning')
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                    suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                'resource': 'suspend',
                                                                'args': None})
                    response = HttpResponseParser(suspend_version_status)
                    if response.status_code == 200:
                        log.info('versioning suspended')
                        write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name,
                                                                   VERSIONING_STATUS['SUSPENDED'])
                    else:
                        raise TestExecError("version suspend failed")
                    # getting all objects in the bucket
                    log.info('getting all objects in the bucket')
                    objects = s3lib.resource_op({'obj': bucket,
                                                 'resource': 'objects',
                                                 'args': None})
                    log.info('objects :%s' % objects)
                    all_objects = s3lib.resource_op({'obj': objects,
                                                     'resource': 'all',
                                                     'args': None})
                    log.info('all objects: %s' % all_objects)
                    log.info('all objects2 :%s ' % bucket.objects.all())
                    for obj in all_objects:
                        log.info('object_name: %s' % obj.key)
                        versions = bucket.object_versions.filter(Prefix=obj.key)
                        log.info('displaying all versions of the object')
                        for version in versions:
                            log.info(
                                'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                if config.test_ops.get('suspend_from_extra_user') is True:
                    log.info('suspending versioning from extra user')
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')

                    bucket_versioning = s3lib.resource_op({'obj': extra_user_conn,
                                                           'resource': 'BucketVersioning',
                                                           'args': [bucket.name]})

                    suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                'resource': 'suspend',
                                                                'args': None})
                    if suspend_version_status is not False:
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info('versioning suspended')
                            write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name,
                                                                       VERSIONING_STATUS['SUSPENDED'])
                            raise TestExecError('version suspended, this should not happen')
                    else:
                        log.info('versioning not suspended, expected behaviour')
            if config.test_ops.get('upload_after_suspend') is True:
                log.info('trying to upload after suspending versioning on bucket')
                for oc, s3_object_size in list(config.mapped_sizes.items()):
                    # non versioning upload
                    s3_object_name = s3_object_names[oc] + ".after_version_suspending"
                    log.info('s3 object name: %s' % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append",
                                                                     **{
                                                                         'message': '\nhello for non version\n'})
                    if non_version_data_info is False:
                        TestExecError("data creation failed")
                    log.info('uploading s3 object: %s' % s3_object_path)
                    upload_info = dict({'access_key': each_user['access_key'],
                                       'versioning_status': 'suspended'},**non_version_data_info)
                    s3_obj = s3lib.resource_op({'obj': bucket,
                                                'resource': 'Object',
                                                'args': [s3_object_name],
                                                'extra_info': upload_info})
                    object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                                'resource': 'upload_file',
                                                                'args': [non_version_data_info['name']],
                                                                'extra_info': upload_info})

                    if object_uploaded_status is False:
                        raise TestExecError("Resource execution failed: object upload failed")
                    if object_uploaded_status is None:
                        log.info('object uploaded')
                    s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                'resource': 'Object',
                                                'args': [bucket.name, s3_object_name]})
                    log.info('version_id: %s' % s3_obj.version_id)
                    if s3_obj.version_id is None:
                        log.info('Versions are not created after suspending')
                    else:
                        raise TestExecError('Versions are created even after suspending')
                    s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                    object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                  'resource': 'download_file',
                                                                  'args': [s3_object_name,
                                                                           s3_object_download_path],
                                                                  })
                    if object_downloaded_status is False:
                        raise TestExecError("Resource execution failed: object download failed")
                    if object_downloaded_status is None:
                        log.info('object downloaded')
                    # checking md5 of the downloaded file
                    s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                    log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                    log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5'])
                    if config.local_file_delete is True:
                        utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
示例#23
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    test_info = AddTestInfo('create m buckets')
    conf_path = '/etc/ceph/%s.conf' % config.cluster_name
    ceph_conf = CephConfOp(conf_path)
    rgw_service = RGWService()
    try:
        test_info.started_info()
        # get user
        with open('user_details') as fout:
            all_users_info = simplejson.load(fout)
        for each_user in all_users_info:
            user_info = basic_io_structure.user(
                **{
                    'user_id': each_user['user_id'],
                    'access_key': each_user['access_key'],
                    'secret_key': each_user['secret_key']
                })
            write_user_info.add_user_info(user_info)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            # enabling sharding
            if config.test_ops['sharding']['enable'] is True:
                log.info('enabling sharding on buckets')
                max_shards = config.test_ops['sharding']['max_shards']
                log.info('making changes to ceph.conf')
                ceph_conf.set_to_ceph_conf(
                    'global', ConfigOpts.rgw_override_bucket_index_max_shards,
                    max_shards)
                log.info('trying to restart services ')
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info('RGW service restarted')
            # create buckets
            if config.test_ops['create_bucket'] is True:
                log.info('no of buckets to create: %s' % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name_to_create = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=bc)
                    log.info('creating bucket with name: %s' %
                             bucket_name_to_create)
                    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                    bucket = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'Bucket',
                        'args': [bucket_name_to_create]
                    })
                    created = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'create',
                        'args': None,
                        'extra_info': {
                            'access_key': each_user['access_key']
                        }
                    })
                    if created is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation failed"
                        )
                    if created is not None:
                        response = HttpResponseParser(created)
                        if response.status_code == 200:
                            log.info('bucket created')
                        else:
                            raise TestExecError("bucket creation failed")
                    else:
                        raise TestExecError("bucket creation failed")
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id' \
                              % (bucket.name, config.cluster_name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace(
                            '"',
                            '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls --cluster %s | grep %s' \
                               % (config.cluster_name, b_id)
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            'got output from sharing verification.--------')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=1)
                bucket = reusable.create_bucket(bucket_name, rgw_conn,
                                                each_user)
                if config.test_ops["enable_versioning"] is True:
                    log.info("bucket versionig test on bucket: %s" %
                             bucket.name)
                    # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                    bucket_versioning = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "BucketVersioning",
                        "args": [bucket.name],
                    })
                    version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "status",
                        "args": None
                    })
                    if version_status is None:
                        log.info("bucket versioning still not enabled")
                    # enabling bucket versioning
                    version_enable_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "enable",
                        "args": None
                    })
                    response = HttpResponseParser(version_enable_status)
                    if response.status_code == 200:
                        log.info("version enabled")
                    else:
                        raise TestExecError("version enable failed")
                if config.test_ops["create_object"] is True:
                    # upload data
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket.name, oc)
                        if config.test_ops["version_count"] > 0:
                            for vc in range(config.test_ops["version_count"]):
                                log.info("version count for %s is %s" %
                                         (s3_object_name, str(vc)))
                                log.info("modifying data: %s" % s3_object_name)
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                    append_data=True,
                                    append_msg="hello object for version: %s\n"
                                    % str(vc),
                                )
                        else:
                            log.info("s3 objects to create: %s" %
                                     config.objects_count)
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                bucket_life_cycle = s3lib.resource_op({
                    "obj": rgw_conn,
                    "resource": "BucketLifecycleConfiguration",
                    "args": [bucket.name],
                })
                life_cycle = basic_lifecycle_config(prefix="key",
                                                    days=20,
                                                    id="rul1")
                put_bucket_life_cycle = s3lib.resource_op({
                    "obj":
                    bucket_life_cycle,
                    "resource":
                    "put",
                    "kwargs":
                    dict(LifecycleConfiguration=life_cycle),
                })
                log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle)
                if put_bucket_life_cycle is False:
                    raise TestExecError(
                        "Resource execution failed: bucket creation faield")
                if put_bucket_life_cycle is not None:
                    response = HttpResponseParser(put_bucket_life_cycle)
                    if response.status_code == 200:
                        log.info("bucket life cycle added")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                else:
                    raise TestExecError("bucket lifecycle addition failed")
                log.info("trying to retrieve bucket lifecycle config")
                get_bucket_life_cycle_config = s3lib.resource_op({
                    "obj":
                    rgw_conn2,
                    "resource":
                    "get_bucket_lifecycle_configuration",
                    "kwargs":
                    dict(Bucket=bucket.name),
                })
                if get_bucket_life_cycle_config is False:
                    raise TestExecError(
                        "bucket lifecycle config retrieval failed")
                if get_bucket_life_cycle_config is not None:
                    response = HttpResponseParser(get_bucket_life_cycle_config)
                    if response.status_code == 200:
                        log.info("bucket life cycle retrieved")
                    else:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                else:
                    raise TestExecError("bucket life cycle retrieved")
                if config.test_ops["create_object"] is True:
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(
                            bucket.name, oc)
                        if config.test_ops["version_count"] > 0:
                            if (config.test_ops.get("delete_versioned_object",
                                                    None) is True):
                                log.info(
                                    "list all the versions of the object and delete the "
                                    "current version of the object")
                                log.info("all versions for the object: %s\n" %
                                         s3_object_name)
                                versions = bucket.object_versions.filter(
                                    Prefix=s3_object_name)
                                t1 = []
                                for version in versions:
                                    log.info(
                                        "key_name: %s --> version_id: %s" %
                                        (version.object_key,
                                         version.version_id))
                                    t1.append(version.version_id)
                                s3_object = s3lib.resource_op({
                                    "obj":
                                    rgw_conn,
                                    "resource":
                                    "Object",
                                    "args": [bucket.name, s3_object_name],
                                })
                                # log.info('object version to delete: %s -> %s' % (versions[0].object_key,
                                #                                                 versions[0].version_id))
                                delete_response = s3_object.delete()
                                log.info("delete response: %s" %
                                         delete_response)
                                if delete_response["DeleteMarker"] is True:
                                    log.info(
                                        "object delete marker is set to true")
                                else:
                                    raise TestExecError(
                                        "'object delete marker is set to false"
                                    )
                                log.info(
                                    "available versions for the object after delete marker is set"
                                )
                                t2 = []
                                versions_after_delete_marker_is_set = (
                                    bucket.object_versions.filter(
                                        Prefix=s3_object_name))
                                for version in versions_after_delete_marker_is_set:
                                    log.info(
                                        "key_name: %s --> version_id: %s" %
                                        (version.object_key,
                                         version.version_id))
                                    t2.append(version.version_id)
                                t2.pop()
                                if t1 == t2:
                                    log.info("versions remained intact")
                                else:
                                    raise TestExecError(
                                        "versions are not intact after delete marker is set"
                                    )
                # modify bucket lifecycle configuration, modify expiration days here for the test case.
                if config.test_ops.get("modify_lifecycle", False) is True:
                    log.info("modifying lifecycle configuration")
                    life_cycle_modifed = basic_lifecycle_config(
                        prefix="key", days=15, id="rul1", status="Disabled")
                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle_modifed),
                    })
                    log.info("put bucket life cycle:\n%s" %
                             put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)

                        if response.status_code == 200:
                            log.info("bucket life cycle added")

                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info("trying to retrieve bucket lifecycle config")
                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        "get_bucket_lifecycle_configuration",
                        "kwargs":
                        dict(Bucket=bucket.name),
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)
                        modified_expiration_days = get_bucket_life_cycle_config[
                            "Rules"][0]["Expiration"]["Days"]
                        log.info("modified expiration days: %s" %
                                 modified_expiration_days)
                        if (response.status_code == 200
                                and modified_expiration_days == 15):
                            log.info(
                                "bucket life cycle retrieved after modifying")
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after modifying"
                            )
                    else:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed after modifying"
                        )
                # disable bucket lifecycle configuration
                if config.test_ops.get("disable_lifecycle", False) is True:
                    log.info("disabling lifecycle configuration")
                    life_cycle_disabled_config = basic_lifecycle_config(
                        prefix="key", days=20, id="rul1", status="Disabled")
                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(
                            LifecycleConfiguration=life_cycle_disabled_config),
                    })
                    log.info("put bucket life cycle:\n%s" %
                             put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info("bucket life cycle added")
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info("trying to retrieve bucket lifecycle config")
                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        "get_bucket_lifecycle_configuration",
                        "kwargs":
                        dict(Bucket=bucket.name),
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)
                        if (response.status_code == 200
                                and get_bucket_life_cycle_config["Rules"][0]
                            ["Status"] == "Disabled"):
                            log.info("disabled_status: %s" %
                                     get_bucket_life_cycle_config["Rules"][0]
                                     ["Status"])
                            log.info(
                                "bucket life cycle retrieved after disabled")
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after disabled"
                            )
                    else:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed after disabled"
                        )
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
示例#25
0
def upload_mutipart_object(s3_object_name,
                           bucket,
                           TEST_DATA_PATH,
                           config,
                           user_info,
                           append_data=False,
                           append_msg=None):
    log.info('s3 object name: %s' % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info('s3 object path: %s' % s3_object_path)
    s3_object_size = config.obj_size
    split_size = config.split_size if hasattr(config, 'split_size') else 5
    log.info('split size: %s' % split_size)
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op='append',
            **{'message': '\n%s' % append_msg})
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    mp_dir = os.path.join(TEST_DATA_PATH, s3_object_name + '.mp.parts')
    log.info('mp part dir: %s' % mp_dir)
    log.info('making multipart object part dir')
    mkdir = utils.exec_shell_cmd('sudo mkdir %s' % mp_dir)
    if mkdir is False:
        raise TestExecError('mkdir failed creating mp_dir_name')
    utils.split_file(s3_object_path, split_size, mp_dir + "/")
    parts_list = sorted(glob.glob(mp_dir + '/' + '*'))
    log.info('parts_list: %s' % parts_list)
    log.info('uploading s3 object: %s' % s3_object_path)
    upload_info = dict(
        {
            'access_key': user_info['access_key'],
            'upload_type': 'multipart'
        }, **data_info)
    s3_obj = s3lib.resource_op({
        'obj': bucket,
        'resource': 'Object',
        'args': [s3_object_name],
    })
    log.info('initiating multipart upload')
    mpu = s3lib.resource_op({
        'obj': s3_obj,
        'resource': 'initiate_multipart_upload',
        'args': None,
        'extra_info': upload_info
    })
    part_number = 1
    parts_info = {'Parts': []}
    log.info('no of parts: %s' % len(parts_list))
    for each_part in parts_list:
        log.info('trying to upload part: %s' % each_part)
        part = mpu.Part(part_number)
        # part_upload_response = part.upload(Body=open(each_part))
        part_upload_response = s3lib.resource_op({
            'obj':
            part,
            'resource':
            'upload',
            'kwargs':
            dict(Body=open(each_part, mode="rb"))
        })
        if part_upload_response is not False:
            response = HttpResponseParser(part_upload_response)
            if response.status_code == 200:
                log.info('part uploaded')
                if config.local_file_delete is True:
                    log.info('deleting local file part')
                    utils.exec_shell_cmd('sudo rm -rf %s' % each_part)
            else:
                raise TestExecError("part uploading failed")
        part_info = {
            'PartNumber': part_number,
            'ETag': part_upload_response['ETag']
        }
        parts_info['Parts'].append(part_info)
        if each_part != parts_list[-1]:
            # increase the part number only if the current part is not the last part
            part_number += 1
        log.info('curr part_number: %s' % part_number)
    # log.info('parts_info so far: %s'% parts_info)
    if len(parts_list) == part_number:
        log.info('all parts upload completed')
        mpu.complete(MultipartUpload=parts_info)
        log.info('multipart upload complete for key: %s' % s3_object_name)
示例#26
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    config.user_count = 1
    tenant1 = "MountEverest"
    tenant2 = "Himalayas"
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info["user_id"]],
        actions_list=["CreateBucket"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")
    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == "modify":
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info("modifying buckey policy")
        actions_list = ["ListBucket", "CreateBucket"]
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy["Policy"])
        log.info("got bucket policy:%s\n" % modified_policy)
        actions_list_from_modified_policy = modified_policy["Statement"][0][
            "Action"]
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info("cleaned_actions_list_from_modified_policy: %s" %
                 cleaned_actions_list_from_modified_policy)
        log.info("actions list to be modified: %s" % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info("cmp_val: %s" % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == "replace":
        log.info("replacing new bucket policy")
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["ListBucket"],
            resources=[t1_u1_bucket2.name],
        )
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("new bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == "delete":
        log.info("in delete bucket policy")
        delete_policy = s3lib.resource_op({
            "obj": bucket_policy_obj,
            "resource": "delete",
            "args": None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "NoSuchBucketPolicy":
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
示例#27
0
def upload_mutipart_object(
    s3_object_name,
    bucket,
    TEST_DATA_PATH,
    config,
    user_info,
    append_data=False,
    append_msg=None,
):
    log.info("s3 object name: %s" % s3_object_name)
    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
    log.info("s3 object path: %s" % s3_object_path)
    s3_object_size = config.obj_size
    split_size = config.split_size if hasattr(config, "split_size") else 5
    log.info("split size: %s" % split_size)
    if append_data is True:
        data_info = manage_data.io_generator(
            s3_object_path,
            s3_object_size,
            op="append",
            **{"message": "\n%s" % append_msg},
        )
    else:
        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
    if data_info is False:
        TestExecError("data creation failed")
    mp_dir = os.path.join(TEST_DATA_PATH, s3_object_name + ".mp.parts")
    log.info("mp part dir: %s" % mp_dir)
    log.info("making multipart object part dir")
    mkdir = utils.exec_shell_cmd("sudo mkdir %s" % mp_dir)
    if mkdir is False:
        raise TestExecError("mkdir failed creating mp_dir_name")
    utils.split_file(s3_object_path, split_size, mp_dir + "/")
    parts_list = sorted(glob.glob(mp_dir + "/" + "*"))
    log.info("parts_list: %s" % parts_list)
    log.info("uploading s3 object: %s" % s3_object_path)
    upload_info = dict(
        {
            "access_key": user_info["access_key"],
            "upload_type": "multipart"
        }, **data_info)
    s3_obj = s3lib.resource_op({
        "obj": bucket,
        "resource": "Object",
        "args": [s3_object_name],
    })
    log.info("initiating multipart upload")
    mpu = s3lib.resource_op({
        "obj": s3_obj,
        "resource": "initiate_multipart_upload",
        "args": None,
        "extra_info": upload_info,
    })
    part_number = 1
    parts_info = {"Parts": []}
    log.info("no of parts: %s" % len(parts_list))
    for each_part in parts_list:
        log.info("trying to upload part: %s" % each_part)
        part = mpu.Part(part_number)
        # part_upload_response = part.upload(Body=open(each_part))
        part_upload_response = s3lib.resource_op({
            "obj":
            part,
            "resource":
            "upload",
            "kwargs":
            dict(Body=open(each_part, mode="rb")),
        })
        if part_upload_response is not False:
            response = HttpResponseParser(part_upload_response)
            if response.status_code == 200:
                log.info("part uploaded")
                if config.local_file_delete is True:
                    log.info("deleting local file part")
                    utils.exec_shell_cmd("sudo rm -rf %s" % each_part)
            else:
                raise TestExecError("part uploading failed")
        part_info = {
            "PartNumber": part_number,
            "ETag": part_upload_response["ETag"]
        }
        parts_info["Parts"].append(part_info)
        if each_part != parts_list[-1]:
            # increase the part number only if the current part is not the last part
            part_number += 1
        log.info("curr part_number: %s" % part_number)
    # log.info('parts_info so far: %s'% parts_info)
    if len(parts_list) == part_number:
        log.info("all parts upload completed")
        mpu.complete(MultipartUpload=parts_info)
        log.info("multipart upload complete for key: %s" % s3_object_name)
def test_exec(config):
    test_info = AddTestInfo('create m buckets with n objects')
    try:
        test_info.started_info()
        # get user
        with open('user_details') as fout:
            all_users_info = simplejson.load(fout)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth_using_client()
            rgw = auth.do_auth()
            bucket_list = []
            buckets = rgw_conn.list_buckets()
            log.info('buckets are %s' % buckets)
            for each_bucket in buckets['Buckets']:
                bucket_list.append(each_bucket['Name'])
            for bucket_name in bucket_list:
                # create 'bucket' resource object
                bucket = rgw.Bucket(bucket_name)
                log.info('In bucket: %s' % bucket_name)
                if config.test_ops['create_object'] is True:
                    # uploading data
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(bucket_name, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                                             config.objects_size_range['max'])
                        data_info = manage_data.io_generator(s3_object_path, s3_object_size)
                        if data_info is False:
                            TestExecError("data creation failed")
                        log.info('uploading s3 object: %s' % s3_object_path)
                        upload_info = dict({'access_key': each_user['access_key']}, **data_info)
                        #                                object_uploaded_status = bucket.upload_file(s3_object_path, s3_object_name)
                        object_uploaded_status = s3lib.resource_op({'obj': bucket,
                                                                    'resource': 'upload_file',
                                                                    'args': [s3_object_path, s3_object_name],
                                                                    'extra_info': upload_info})
                        if object_uploaded_status is False:
                            raise TestExecError("Resource execution failed: object upload failed")
                        if object_uploaded_status is None:
                            log.info('object uploaded')
                        if config.test_ops['download_object'] is True:
                            log.info('trying to download object: %s' % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name)
                            log.info('s3_object_download_path: %s' % s3_object_download_path)
                            log.info('downloading to filename: %s' % s3_object_download_name)
                            #                                    object_downloaded_status = bucket.download_file(s3_object_path, s3_object_name)
                            object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                          'resource': 'download_file',
                                                                          'args': [s3_object_name,
                                                                                   s3_object_download_path],
                                                                          })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                    if config.test_ops['delete_bucket_object'] is True:
                        log.info('listing all objects in bucket: %s' % bucket.name)
                        # objects = s3_ops.resource_op(bucket, 'objects', None)
                        objects = s3lib.resource_op({'obj': bucket,
                                                     'resource': 'objects',
                                                     'args': None})
                        log.info('objects :%s' % objects)
                        # all_objects = s3_ops.resource_op(objects, 'all')
                        all_objects = s3lib.resource_op({'obj': objects,
                                                         'resource': 'all',
                                                         'args': None})
                        log.info('all objects: %s' % all_objects)
                        for obj in all_objects:
                            log.info('object_name: %s' % obj.key)
                        log.info('deleting all objects in bucket')
                        # objects_deleted = s3_ops.resource_op(objects, 'delete')
                        objects_deleted = s3lib.resource_op({'obj': objects,
                                                             'resource': 'delete',
                                                             'args': None})
                        log.info('objects_deleted: %s' % objects_deleted)
                        if objects_deleted is False:
                            raise TestExecError('Resource execution failed: Object deletion failed')
                        if objects_deleted is not None:
                            response = HttpResponseParser(objects_deleted[0])
                            if response.status_code == 200:
                                log.info('objects deleted ')
                            else:
                                raise TestExecError("objects deletion failed")
                        else:
                            raise TestExecError("objects deletion failed")
                        # wait for object delete info to sync
                        time.sleep(60)
                        log.info('deleting bucket: %s' % bucket.name)
                        # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete')
                        bucket_deleted_status = s3lib.resource_op({'obj': bucket,
                                                                   'resource': 'delete',
                                                                   'args': None})
                        log.info('bucket_deleted_status: %s' % bucket_deleted_status)
                        if bucket_deleted_status is not None:
                            response = HttpResponseParser(bucket_deleted_status)
                            if response.status_code == 204:
                                log.info('bucket deleted ')
                            else:
                                raise TestExecError("bucket deletion failed")
                        else:
                            raise TestExecError("bucket deletion failed")
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(rgw_user_info_file, config):

    test_info = AddTestInfo('Test Basic IO on S3')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()

    try:

        test_info.started_info()

        with open(rgw_user_info_yaml, 'r') as f:
            rgw_user_info = yaml.load(f)

        mount_point = rgw_user_info['nfs_mnt_point']

        nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file)

        mounted = nfs_ganesha.initialize(write_io_info=False)

        if mounted is False:
            raise TestExecError("mount failed")

        if nfs_ganesha.rgw_user_info[
                'nfs_version'] == 4 and nfs_ganesha.rgw_user_info[
                    'Pseudo'] is not None:
            log.info('nfs version: 4')
            log.info('adding Pseudo path to writable mount point')
            mount_point = os.path.join(mount_point,
                                       nfs_ganesha.rgw_user_info['Pseudo'])
            log.info('writable mount point with Pseudo: %s' % mount_point)

        log.info('authenticating rgw user')

        # authenticate

        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()

        # add user_info io_info yaml file

        user_info_add = basic_io_structure.user(**rgw_user_info)
        write_user_info.add_user_info(user_info_add)

        if config.io_op_config.get('create', None) is True:

            # create buckets

            for bc in range(config.bucket_count):

                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    rgw_user_info['user_id'], rand_no=bc)

                bucket = s3_reusables.create_bucket(bucket_name_to_create,
                                                    rgw_conn, rgw_user_info)

                # uploading data

                log.info('s3 objects to create: %s' % config.objects_count)

                for oc in range(config.objects_count):

                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)

                    s3_reusables.upload_object(s3_object_name, bucket,
                                               TEST_DATA_PATH, config,
                                               rgw_user_info)

            log.info('verification Starts on NFS mount after %s seconds' %
                     SLEEP_TIME)

            time.sleep(SLEEP_TIME)

            read_io_info_on_nfs = ReadIOInfoOnNFS(mount_point)
            read_io_info_on_nfs.yaml_fname = 'io_info.yaml'
            read_io_info_on_nfs.initialize_verify_io()
            read_io_info_on_nfs.verify_if_basedir_created()
            read_io_info_on_nfs.verify_if_files_created()

            log.info('verification complete, data intact')

            created_buckets = read_io_info_on_nfs.base_dirs
            created_objects = read_io_info_on_nfs.files

            if config.io_op_config.get('delete', None) is True:

                log.info('delete operation starts')

                for bucket_name in created_buckets:

                    bucket = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Bucket',
                        'args': [os.path.basename(bucket_name)]
                    })  # buckets are base dirs in NFS

                    objects = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'objects',
                        'args': None
                    })

                    log.info('deleting all objects in bucket')

                    objects_deleted = s3lib.resource_op({
                        'obj': objects,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('objects_deleted: %s' % objects_deleted)

                    if objects_deleted is False:
                        raise TestExecError(
                            'Resource execution failed: Object deletion failed'
                        )

                    if objects_deleted is not None:

                        response = HttpResponseParser(objects_deleted[0])

                        if response.status_code == 200:
                            log.info('objects deleted ')

                        else:
                            raise TestExecError("objects deletion failed")

                    else:
                        raise TestExecError("objects deletion failed")

                    log.info('deleting bucket: %s' % bucket.name)

                    bucket_deleted_status = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('bucket_deleted_status: %s' %
                             bucket_deleted_status)

                    if bucket_deleted_status is not None:

                        response = HttpResponseParser(bucket_deleted_status)

                        if response.status_code == 204:
                            log.info('bucket deleted ')

                        else:
                            raise TestExecError("bucket deletion failed")

                    else:
                        raise TestExecError("bucket deletion failed")

                log.info(
                    'verification on NFS will start after %s seconds for delete operation'
                    % SLEEP_TIME)

                time.sleep(200)

                for basedir in created_buckets:

                    exists = os.path.exists(basedir)

                    log.info('exists status: %s' % exists)

                    if exists is True:
                        raise TestExecError(
                            "Basedir or Basedir: %s not deleted on NFS" %
                            basedir)

                log.info('basedirs deleted')

                for each_file in created_objects:

                    log.info('verifying existence for: %s' % each_file['file'])

                    exists = os.path.exists(each_file['file'])

                    if exists:
                        raise TestExecError("files not created")

                    log.info('file deleted')

                log.info(
                    'verification of files complete, files exists and data intact'
                )

            if config.io_op_config.get('move', None) is True:

                log.info('move operation starts')

                for each_file in created_objects:

                    # in s3 move operation is achieved by copying the same object with the new name and
                    #  deleting the old object

                    log.info('move operation for :%s' % each_file['file'])

                    new_obj_name = os.path.basename(
                        each_file['file']) + ".moved"

                    log.info('new file name: %s' % new_obj_name)

                    new_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [each_file['bucket'], new_obj_name],
                    })

                    new_object.copy_from(
                        CopySource='%s/%s' %
                        (each_file['bucket'],
                         os.path.basename(
                             each_file['file'])))  # old object name

                    old_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [
                            each_file['bucket'],
                            os.path.basename(each_file['file'])
                        ],
                    })
                    old_object.delete()

                    each_file['file'] = os.path.abspath(
                        os.path.join(mount_point, each_file['bucket'],
                                     new_obj_name))

                log.info(
                    'verification on NFS for move operation will start after %s seconds'
                    % SLEEP_TIME)
                time.sleep(SLEEP_TIME)

                read_io_info_on_nfs.verify_if_files_created()

                log.info('move completed, data intact')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("create m buckets with n objects with bucket life cycle")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count, config.cluster_name)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            if config.test_ops["create_bucket"] is True:
                log.info("no of buckets to create: %s" % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user["user_id"], rand_no=1
                    )
                    bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user)
                    if config.test_ops["create_object"] is True:
                        # uploading data
                        log.info("s3 objects to create: %s" % config.objects_count)
                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                            resuables.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                    bucket_life_cycle = s3lib.resource_op(
                        {
                            "obj": rgw_conn,
                            "resource": "BucketLifecycleConfiguration",
                            "args": [bucket.name],
                        }
                    )
                    life_cycle = basic_lifecycle_config(
                        prefix="key", days=20, id="rul1"
                    )
                    put_bucket_life_cycle = s3lib.resource_op(
                        {
                            "obj": bucket_life_cycle,
                            "resource": "put",
                            "kwargs": dict(LifecycleConfiguration=life_cycle),
                        }
                    )
                    log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info("bucket life cycle added")
                        else:
                            raise TestExecError("bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info("trying to retrieve bucket lifecycle config")
                    get_bucket_life_cycle_config = s3lib.resource_op(
                        {
                            "obj": rgw_conn2,
                            "resource": "get_bucket_lifecycle_configuration",
                            "kwargs": dict(Bucket=bucket.name),
                        }
                    )
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError("bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(get_bucket_life_cycle_config)
                        if response.status_code == 200:
                            log.info("bucket life cycle retrieved")
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed"
                            )
                    else:
                        raise TestExecError("bucket life cycle retrieved")
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)