Exemple #1
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    config.user_count = 1
    tenant1 = 'MountEverest'
    tenant2 = 'Himalayas'
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info['user_id'], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info['user_id'], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info['user_id']],
        actions_list=['CreateBucket'],
        resources=[t1_u1_bucket1.name])
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info('jsoned policy:%s\n' % bucket_policy)
    log.info('bucket_policy_generated:%s\n' % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        'obj': rgw_tenant1_user1,
        'resource': 'BucketPolicy',
        'args': [t1_u1_bucket1.name]
    })
    put_policy = s3lib.resource_op({
        'obj':
        bucket_policy_obj,
        'resource':
        'put',
        'kwargs':
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy)
    })
    log.info('put policy response:%s\n' % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200:
            log.info('bucket policy created')
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")
    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info('got bucket policy:%s\n' % get_policy['Policy'])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == 'modify':
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info('modifying buckey policy')
        actions_list = ['ListBucket', 'CreateBucket']
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info['user_id']],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name])
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            'obj':
            bucket_policy_obj,
            'resource':
            'put',
            'kwargs':
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2)
        })
        log.info('put policy response:%s\n' % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200:
                log.info('bucket policy created')
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy['Policy'])
        log.info('got bucket policy:%s\n' % modified_policy)
        actions_list_from_modified_policy = modified_policy['Statement'][0][
            'Action']
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info('cleaned_actions_list_from_modified_policy: %s' %
                 cleaned_actions_list_from_modified_policy)
        log.info('actions list to be modified: %s' % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info('cmp_val: %s' % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == 'replace':
        log.info('replacing new bucket policy')
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info['user_id']],
            actions_list=['ListBucket'],
            resources=[t1_u1_bucket2.name])
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            'obj':
            bucket_policy_obj,
            'resource':
            'put',
            'kwargs':
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy)
        })
        log.info('put policy response:%s\n' % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200:
                log.info('new bucket policy created')
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == 'delete':
        log.info('in delete bucket policy')
        delete_policy = s3lib.resource_op({
            'obj': bucket_policy_obj,
            'resource': 'delete',
            'args': None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200:
                log.info('bucket policy deleted')
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'NoSuchBucketPolicy':
                log.info('bucket policy deleted')
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Exemple #2
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    config.user_count = 1
    tenant1 = "MountEverest"
    tenant2 = "Himalayas"
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info["user_id"]],
        actions_list=["CreateBucket"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")
    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == "modify":
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info("modifying buckey policy")
        actions_list = ["ListBucket", "CreateBucket"]
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy["Policy"])
        log.info("got bucket policy:%s\n" % modified_policy)
        actions_list_from_modified_policy = modified_policy["Statement"][0][
            "Action"]
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info("cleaned_actions_list_from_modified_policy: %s" %
                 cleaned_actions_list_from_modified_policy)
        log.info("actions list to be modified: %s" % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info("cmp_val: %s" % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == "replace":
        log.info("replacing new bucket policy")
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["ListBucket"],
            resources=[t1_u1_bucket2.name],
        )
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("new bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == "delete":
        log.info("in delete bucket policy")
        delete_policy = s3lib.resource_op({
            "obj": bucket_policy_obj,
            "resource": "delete",
            "args": None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "NoSuchBucketPolicy":
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    if config.test_ops.get("upload_type") == "multipart":
        srv_time_pre_op = get_svc_time()

    # create user
    tenant1 = "tenant_" + random.choice(string.ascii_letters)
    tenant1_user_info = s3lib.create_tenant_users(tenant_name=tenant1,
                                                  no_of_users_to_create=2)
    tenant1_user1_info = tenant1_user_info[0]
    tenant1_user2_info = tenant1_user_info[1]

    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant1_user2_auth = Auth(tenant1_user2_info, ssl=config.ssl)

    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant1_user2 = tenant1_user2_auth.do_auth()
    rgw_tenant1_user2_c = tenant1_user2_auth.do_auth_using_client()

    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant1_user2_info["user_id"]],
        actions_list=["ListBucketMultiPartUploads"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")

    if config.test_ops.get("upload_type") == "multipart":
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            for bucket in [t1_u1_bucket1, t1_u1_bucket2]:
                s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                log.info("s3 objects to create: %s" % config.objects_count)
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    tenant1_user1_info,
                )
        srv_time_post_op = get_svc_time()
        log.info(srv_time_pre_op)
        log.info(srv_time_post_op)

        if srv_time_post_op > srv_time_pre_op:
            log.info("Service is running without crash")
        else:
            raise TestExecError("Service got crashed")

    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])

    # List multipart uploads with tenant1_user2 user with bucket t1_u1_bucket1
    multipart_object1 = rgw_tenant1_user2_c.list_multipart_uploads(
        Bucket=t1_u1_bucket1.name)
    log.info("Multipart object %s" % multipart_object1)

    # Verify tenant1_user2 not having permission for listing multipart uploads in t1_u1_bucket2
    try:
        multipart_object2 = rgw_tenant1_user2_c.list_multipart_uploads(
            Bucket=t1_u1_bucket2.name)
        raise Exception(
            "%s user should not list multipart uploads in bucket: %s" %
            (tenant1_user2_info["user_id"], t1_u1_bucket2.name))
    except ClientError as err:
        log.info("Listing failed as expected with exception: %s" % err)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")