def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "BucketVersioning",
        "args": [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "status",
        "args": None
    })
    if version_status is None:
        log.info("bucket versioning still not enabled")
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "enable",
        "args": None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info("version enabled")
    else:
        raise TestExecError("version enable failed")
    return bucket
Example #2
0
def test_exec(config):
    test_info = AddTestInfo('Bucket Request Payer')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            s3_object_names = []
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = resuables.create_bucket(
                    bucket_name=bucket_name_to_create,
                    rgw=rgw_conn,
                    user_info=each_user)
                bucket_request_payer = s3lib.resource_op({
                    'obj': rgw_conn,
                    'resource': 'BucketRequestPayment',
                    'args': [bucket.name]
                })
                # change the bucket request payer to 'requester'
                payer = {'Payer': 'Requester'}
                response = s3lib.resource_op({
                    'obj':
                    bucket_request_payer,
                    'resource':
                    'put',
                    'kwargs':
                    dict(RequestPaymentConfiguration=payer)
                })
                log.info(response)

        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({'obj': rgw_conn,
                                           'resource': 'BucketVersioning',
                                           'args': [bucket.name]})
    # checking the versioning status
    version_status = s3lib.resource_op({'obj': bucket_versioning,
                                        'resource': 'status',
                                        'args': None
                                        })
    if version_status is None:
        log.info('bucket versioning still not enabled')
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({'obj': bucket_versioning,
                                               'resource': 'enable',
                                               'args': None})
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info('version enabled')
    else:
        raise TestExecError("version enable failed")
    return bucket
def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # preparing data
        user_names = ['user1', 'user2', 'user3']
        Bucket_names = ['bucket1', 'bucket2', 'bucket3']
        object_names = ['o1', 'o2']
        tenant1 = 'tenant1'
        tenant2 = 'tenant2'
        t1_u1_info = create_tenant_user(tenant_name=tenant1,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t1_u1_auth = Auth(t1_u1_info)
        t1_u1 = t1_u1_auth.do_auth()
        t2_u1_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t2_u1_auth = Auth(t2_u1_info)
        t2_u1 = t2_u1_auth.do_auth()
        t1_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t1_u1,
                                           user_info=t1_u1_info)
        t2_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t2_u1,
                                           user_info=t2_u1_info)
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t1_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t2_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        t2_u2_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[1],
                                        cluster_name=config.cluster_name)
        t2_u2_auth = Auth(t2_u2_info)
        t2_u2 = t2_u2_auth.do_auth()
        # will try to access the bucket and objects in both tenants
        # access t1_u1_b1
        log.info('trying to access tenant1->user1->bucket1')
        t1_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant1->user1->bucket1->object1 from tenant2->user2'
        )
        download_path1 = TEST_DATA_PATH + "/t1_u1_b1_%s.download" % object_names[
            0]
        t1_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t1_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path1]
        })
        if t1_u1_b1_o1_download is False:
            log.info('object not downloaded\n')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                "object downloaded for tenant1->user1->bucket1->object1, this should not happen"
            )
        log.info(
            'trying to access tenant2->user1->bucket1 from user2 in tenant 2')
        t2_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant2->user1->bucket1->object1 from tenant2->user2'
        )
        download_path2 = TEST_DATA_PATH + "/t2_u1_b1_%s.download" % object_names[
            0]
        t2_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t2_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path2]
        })
        if t2_u1_b1_o1_download is False:
            log.info('object did not download, worked as expected')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                'object downloaded\n'
                'downloaded tenant2->user1->bucket1->object1, this should not happen'
            )
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("create m buckets with n objects with bucket life cycle")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count, config.cluster_name)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            if config.test_ops["create_bucket"] is True:
                log.info("no of buckets to create: %s" % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user["user_id"], rand_no=1
                    )
                    bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user)
                    if config.test_ops["create_object"] is True:
                        # uploading data
                        log.info("s3 objects to create: %s" % config.objects_count)
                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                            resuables.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                    bucket_life_cycle = s3lib.resource_op(
                        {
                            "obj": rgw_conn,
                            "resource": "BucketLifecycleConfiguration",
                            "args": [bucket.name],
                        }
                    )
                    life_cycle = basic_lifecycle_config(
                        prefix="key", days=20, id="rul1"
                    )
                    put_bucket_life_cycle = s3lib.resource_op(
                        {
                            "obj": bucket_life_cycle,
                            "resource": "put",
                            "kwargs": dict(LifecycleConfiguration=life_cycle),
                        }
                    )
                    log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info("bucket life cycle added")
                        else:
                            raise TestExecError("bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info("trying to retrieve bucket lifecycle config")
                    get_bucket_life_cycle_config = s3lib.resource_op(
                        {
                            "obj": rgw_conn2,
                            "resource": "get_bucket_lifecycle_configuration",
                            "kwargs": dict(Bucket=bucket.name),
                        }
                    )
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError("bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(get_bucket_life_cycle_config)
                        if response.status_code == 200:
                            log.info("bucket life cycle retrieved")
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed"
                            )
                    else:
                        raise TestExecError("bucket life cycle retrieved")
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
Example #6
0
def test_exec(config):
    test_info = AddTestInfo('storage_policy for %s' % config.rgw_client)
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    try:
        # create pool
        pool_name = '.rgw.buckets.special'
        pg_num = '8'
        pgp_num = '8'
        pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (pool_name, pg_num, pgp_num)
        pool_create_exec = utils.exec_shell_cmd(pool_create)
        if pool_create_exec is False:
            raise TestExecError("Pool creation failed")
        # create realm
        realm_name = 'buz-tickets'
        log.info('creating realm name')
        realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s --default' % realm_name
        realm_create_exec = utils.exec_shell_cmd(realm_create)
        if realm_create_exec is False:
            raise TestExecError("cmd execution failed")
        # sample output of create realm
        """
        {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
        
        """
        log.info('modify zonegroup ')
        modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default' % realm_name
        modify_exec = utils.exec_shell_cmd(modify)
        if modify_exec is False:
            raise TestExecError("cmd execution failed")
        # get the zonegroup
        zonegroup_file = 'zonegroup.json'
        get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file
        get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
        if get_zonegroup_exec is False:
            raise TestExecError("cmd execution failed")
        add_to_placement_targets = {
            "name": "special-placement",
            "tags": []
        }
        fp = open(zonegroup_file, 'r')
        zonegroup_txt = fp.read()
        fp.close()
        log.info('got zonegroup info: \n%s' % zonegroup_txt)
        zonegroup = json.loads(zonegroup_txt)
        log.info('adding placement targets')
        zonegroup['placement_targets'].append(add_to_placement_targets)
        with open(zonegroup_file, 'w') as fp:
            json.dump(zonegroup, fp)
        zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file
        zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
        if zonegroup_set_exec is False:
            raise TestExecError("cmd execution failed")
        log.info('zone group update completed')
        log.info('getting zone file')
        # get zone
        log.info('getting zone info')
        zone_file = 'zone.json'
        get_zone = 'sudo radosgw-admin zone --rgw-zone=default  get > zone.json'
        get_zone_exec = utils.exec_shell_cmd(get_zone)
        if get_zone_exec is False:
            raise TestExecError("cmd execution failed")
        fp = open(zone_file, 'r')
        zone_info = fp.read()
        fp.close()
        log.info('zone_info :\n%s' % zone_info)
        zone_info_cleaned = json.loads(zone_info)
        special_placement_info = {
            "key": "special-placement",
            "val": {
                "index_pool": ".rgw.buckets.index",
                "data_pool": ".rgw.buckets.special",
                "data_extra_pool": ".rgw.buckets.extra"
            }
        }
        log.info('adding  special placement info')
        zone_info_cleaned['placement_pools'].append(special_placement_info)
        print(zone_info_cleaned)
        with open(zone_file, 'w+') as fp:
            json.dump(zone_info_cleaned, fp)
        zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file
        zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
        if zone_file_set_exec is False:
            raise TestExecError("cmd execution failed")

        log.info('zone info updated ')
        restarted = rgw_service.restart()
        if restarted is False:
            raise TestExecError("service restart failed")
        if config.rgw_client == 'rgw':
            log.info('client type is rgw')
            rgw_user_info = s3_swift_lib.create_users(1)
            auth = Auth(rgw_user_info)
            rgw_conn = auth.do_auth()
            # create bucket
            bucket_name = utils.gen_bucket_name_from_userid(rgw_user_info['user_id'], 0)
            bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
            # create object
            s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
            resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info)

        if config.rgw_client == 'swift':
            log.info('client type is swift')

            user_names = ['tuffy', 'scooby', 'max']
            tenant = 'tenant'

            umgmt = UserMgmt()
            umgmt.create_tenant_user(tenant_name=tenant, user_id=user_names[0],
                                     displayname=user_names[0])

            user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])

            auth = Auth(user_info)
            rgw = auth.do_auth()
            container_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=0)
            container = s3_swift_lib.resource_op({'obj': rgw,
                                                  'resource': 'put_container',
                                                  'args': [container_name]})
            if container is False:
                raise TestExecError("Resource execution failed: container creation faield")
            swift_object_name = utils.gen_s3_object_name('%s.container.%s' % (user_names[0], 0), 0)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            object_size = utils.get_file_size(config.objects_size_range['min'],
                                              config.objects_size_range['max'])
            data_info = manage_data.io_generator(object_path, object_size)
            # upload object
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name, swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #7
0
def test_exec(config):
    test_info = AddTestInfo("test bucket policy")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        config.user_count = 1
        tenant1 = "MountEverest"
        tenant2 = "Himalayas"
        tenant1_user_info = s3lib.create_tenant_users(
            tenant_name=tenant1,
            no_of_users_to_create=config.user_count,
            cluster_name=config.cluster_name,
        )
        tenant1_user1_info = tenant1_user_info[0]
        tenant2_user_info = s3lib.create_tenant_users(
            tenant_name=tenant2,
            no_of_users_to_create=config.user_count,
            cluster_name=config.cluster_name,
        )
        tenant2_user1_info = tenant2_user_info[0]
        tenant1_user1_auth = Auth(tenant1_user1_info)
        tenant2_user1_auth = Auth(tenant2_user1_info)
        rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
        rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
        rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
        rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
        # steps
        # create bucket in tenant1 for user1
        # generate bucket policy to user1 in tenant1, policy: list access to user1 in tenant2
        # add the policy to user1 in bucket1
        # # testing
        # modify bucket policy to replace the existing policy - TC 11215
        # add policy to the existing policy - TC 11214
        bucket_name1 = utils.gen_bucket_name_from_userid(
            tenant1_user1_info["user_id"], rand_no=1
        )
        t1_u1_bucket1 = resuables.create_bucket(
            bucket_name1,
            rgw_tenant1_user1,
            tenant1_user1_info,
        )
        bucket_name2 = utils.gen_bucket_name_from_userid(
            tenant1_user1_info["user_id"], rand_no=2
        )
        t1_u1_bucket2 = resuables.create_bucket(
            bucket_name2,
            rgw_tenant1_user1,
            tenant1_user1_info,
        )
        bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["CreateBucket"],
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy = json.dumps(bucket_policy_generated)
        log.info("jsoned policy:%s\n" % bucket_policy)
        log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
        bucket_policy_obj = s3lib.resource_op(
            {
                "obj": rgw_tenant1_user1,
                "resource": "BucketPolicy",
                "args": [t1_u1_bucket1.name],
            }
        )
        put_policy = s3lib.resource_op(
            {
                "obj": bucket_policy_obj,
                "resource": "put",
                "kwargs": dict(
                    ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy
                ),
            }
        )
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError("Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        # get policy
        get_policy = rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
        log.info("got bucket policy:%s\n" % get_policy["Policy"])
        # modifying bucket policy to take new policy
        if config.bucket_policy_op == "modify":
            # adding new action list: ListBucket to existing action: CreateBucket
            log.info("modifying buckey policy")
            actions_list = ["ListBucket", "CreateBucket"]
            actions = list(map(s3_bucket_policy.gen_action, actions_list))
            bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
                tenants_list=[tenant1],
                userids_list=[tenant2_user1_info["user_id"]],
                actions_list=actions_list,
                resources=[t1_u1_bucket1.name],
            )
            bucket_policy2 = json.dumps(bucket_policy2_generated)
            put_policy = s3lib.resource_op(
                {
                    "obj": bucket_policy_obj,
                    "resource": "put",
                    "kwargs": dict(
                        ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2
                    ),
                }
            )
            log.info("put policy response:%s\n" % put_policy)
            if put_policy is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if put_policy is not None:
                response = HttpResponseParser(put_policy)
                if response.status_code == 200:
                    log.info("bucket policy created")
                else:
                    raise TestExecError("bucket policy creation failed")
            else:
                raise TestExecError("bucket policy creation failed")
            get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
                Bucket=t1_u1_bucket1.name
            )
            modified_policy = json.loads(get_modified_policy["Policy"])
            log.info("got bucket policy:%s\n" % modified_policy)
            actions_list_from_modified_policy = modified_policy["Statement"][0][
                "Action"
            ]
            cleaned_actions_list_from_modified_policy = list(
                map(str, actions_list_from_modified_policy)
            )
            log.info(
                "cleaned_actions_list_from_modified_policy: %s"
                % cleaned_actions_list_from_modified_policy
            )
            log.info("actions list to be modified: %s" % actions)
            cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
            log.info("cmp_val: %s" % cmp_val)
            if cmp_val != 0:
                raise TestExecError("modification of bucket policy failed ")
        if config.bucket_policy_op == "replace":
            log.info("replacing new bucket policy")
            new_policy_generated = s3_bucket_policy.gen_bucket_policy(
                tenants_list=[tenant1],
                userids_list=[tenant2_user1_info["user_id"]],
                actions_list=["ListBucket"],
                resources=[t1_u1_bucket2.name],
            )
            new_policy = json.dumps(new_policy_generated)
            put_policy = s3lib.resource_op(
                {
                    "obj": bucket_policy_obj,
                    "resource": "put",
                    "kwargs": dict(
                        ConfirmRemoveSelfBucketAccess=True, Policy=new_policy
                    ),
                }
            )
            log.info("put policy response:%s\n" % put_policy)
            if put_policy is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if put_policy is not None:
                response = HttpResponseParser(put_policy)
                if response.status_code == 200:
                    log.info("new bucket policy created")
                else:
                    raise TestExecError("bucket policy creation failed")
            else:
                raise TestExecError("bucket policy creation failed")
        if config.bucket_policy_op == "delete":
            log.info("in delete bucket policy")
            delete_policy = s3lib.resource_op(
                {"obj": bucket_policy_obj, "resource": "delete", "args": None}
            )
            if delete_policy is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if delete_policy is not None:
                response = HttpResponseParser(delete_policy)
                if response.status_code == 200:
                    log.info("bucket policy deleted")
                else:
                    raise TestExecError("bucket policy deletion failed")
            else:
                raise TestExecError("bucket policy deletion failed")
            # confirming once again by calling get_bucket_policy
            try:
                rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
                raise TestExecError("bucket policy did not get deleted")
            except boto3exception.ClientError as e:
                log.info(e.response)
                response = HttpResponseParser(e.response)
                if response.error["Code"] == "NoSuchBucketPolicy":
                    log.info("bucket policy deleted")
                else:
                    raise TestExecError("bucket policy did not get deleted")
            # log.info('get_policy after deletion: %s' % get_policy)
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
Example #8
0
def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        config.user_count = 1
        tenant1 = 'MountEverest'
        tenant2 = 'Himalayas'
        tenant1_user_info = s3lib.create_tenant_users(tenant_name=tenant1, no_of_users_to_create=config.user_count,
                                                      cluster_name=config.cluster_name)
        tenant1_user1_info = tenant1_user_info[0]
        tenant2_user_info = s3lib.create_tenant_users(tenant_name=tenant2, no_of_users_to_create=config.user_count,
                                                      cluster_name=config.cluster_name)
        tenant2_user1_info = tenant2_user_info[0]
        tenant1_user1_auth = Auth(tenant1_user1_info)
        tenant2_user1_auth = Auth(tenant2_user1_info)
        rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
        rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
        rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
        rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
        # steps
        # create bucket in tenant1 for user1
        # generate bucket policy to user1 in tenant1, policy: list access to user1 in tenant2
        # add the policy to user1 in bucket1
        # # testing
        # modify bucket policy to replace the existing policy - TC 11215
        # add policy to the existing policy - TC 11214
        bucket_name1 = utils.gen_bucket_name_from_userid(tenant1_user1_info['user_id'], rand_no=1)
        t1_u1_bucket1 = resuables.create_bucket(bucket_name1, rgw_tenant1_user1,
                                                tenant1_user1_info,
                                                )
        bucket_name2 = utils.gen_bucket_name_from_userid(tenant1_user1_info['user_id'], rand_no=2)
        t1_u1_bucket2 = resuables.create_bucket(bucket_name2, rgw_tenant1_user1,
                                                tenant1_user1_info,
                                                )
        bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(tenants_list=[tenant1],
                                                                     userids_list=[tenant2_user1_info['user_id']],
                                                                     actions_list=['CreateBucket'],
                                                                     resources=[t1_u1_bucket1.name]
                                                                     )
        bucket_policy = json.dumps(bucket_policy_generated)
        log.info('jsoned policy:%s\n' % bucket_policy)
        log.info('bucket_policy_generated:%s\n' % bucket_policy_generated)
        bucket_policy_obj = s3lib.resource_op({'obj': rgw_tenant1_user1,
                                               'resource': 'BucketPolicy',
                                               'args': [t1_u1_bucket1.name]})
        put_policy = s3lib.resource_op({'obj': bucket_policy_obj,
                                        'resource': 'put',
                                        'kwargs': dict(ConfirmRemoveSelfBucketAccess=True,
                                                       Policy=bucket_policy)})
        log.info('put policy response:%s\n' % put_policy)
        if put_policy is False:
            raise TestExecError("Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200:
                log.info('bucket policy created')
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        # get policy
        get_policy = rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
        log.info('got bucket policy:%s\n' % get_policy['Policy'])
        # modifying bucket policy to take new policy
        if config.bucket_policy_op == 'modify':
            # adding new action list: ListBucket to existing action: CreateBucket
            log.info('modifying buckey policy')
            actions_list = ['ListBucket', 'CreateBucket']
            actions = list(map(s3_bucket_policy.gen_action, actions_list))
            bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(tenants_list=[tenant1],
                                                                          userids_list=[tenant2_user1_info['user_id']],
                                                                          actions_list=actions_list,
                                                                          resources=[t1_u1_bucket1.name]
                                                                          )
            bucket_policy2 = json.dumps(bucket_policy2_generated)
            put_policy = s3lib.resource_op({'obj': bucket_policy_obj,
                                            'resource': 'put',
                                            'kwargs': dict(ConfirmRemoveSelfBucketAccess=True,
                                                           Policy=bucket_policy2)})
            log.info('put policy response:%s\n' % put_policy)
            if put_policy is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if put_policy is not None:
                response = HttpResponseParser(put_policy)
                if response.status_code == 200:
                    log.info('bucket policy created')
                else:
                    raise TestExecError("bucket policy creation failed")
            else:
                raise TestExecError("bucket policy creation failed")
            get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            modified_policy = json.loads(get_modified_policy['Policy'])
            log.info('got bucket policy:%s\n' % modified_policy)
            actions_list_from_modified_policy = modified_policy['Statement'][0]['Action']
            cleaned_actions_list_from_modified_policy = list(map(str, actions_list_from_modified_policy))
            log.info('cleaned_actions_list_from_modified_policy: %s' % cleaned_actions_list_from_modified_policy)
            log.info('actions list to be modified: %s' % actions)
            cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
            log.info('cmp_val: %s' % cmp_val)
            if cmp_val != 0:
                raise TestExecError("modification of bucket policy failed ")
        if config.bucket_policy_op == 'replace':
            log.info('replacing new bucket policy')
            new_policy_generated = s3_bucket_policy.gen_bucket_policy(tenants_list=[tenant1],
                                                                      userids_list=[tenant2_user1_info['user_id']],
                                                                      actions_list=['ListBucket'],
                                                                      resources=[t1_u1_bucket2.name]
                                                                      )
            new_policy = json.dumps(new_policy_generated)
            put_policy = s3lib.resource_op({'obj': bucket_policy_obj,
                                            'resource': 'put',
                                            'kwargs': dict(ConfirmRemoveSelfBucketAccess=True,
                                                           Policy=new_policy)})
            log.info('put policy response:%s\n' % put_policy)
            if put_policy is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if put_policy is not None:
                response = HttpResponseParser(put_policy)
                if response.status_code == 200:
                    log.info('new bucket policy created')
                else:
                    raise TestExecError("bucket policy creation failed")
            else:
                raise TestExecError("bucket policy creation failed")
        if config.bucket_policy_op == 'delete':
            log.info('in delete bucket policy')
            delete_policy = s3lib.resource_op({'obj': bucket_policy_obj,
                                               'resource': 'delete',
                                               'args': None}
                                              )
            if delete_policy is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if delete_policy is not None:
                response = HttpResponseParser(delete_policy)
                if response.status_code == 200:
                    log.info('bucket policy deleted')
                else:
                    raise TestExecError("bucket policy deletion failed")
            else:
                raise TestExecError("bucket policy deletion failed")
            # confirming once again by calling get_bucket_policy
            try:
                rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
                raise TestExecError("bucket policy did not get deleted")
            except boto3exception.ClientError as e:
                log.info(e.response)
                response = HttpResponseParser(e.response)
                if response.error['Code'] == 'NoSuchBucketPolicy':
                    log.info('bucket policy deleted')
                else:
                    raise TestExecError("bucket policy did not get deleted")
            # log.info('get_policy after deletion: %s' % get_policy)
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo('Bucket Request Payer')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            s3_object_names = []
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = resuables.create_bucket(bucket_name=bucket_name_to_create, rgw=rgw_conn, user_info=each_user)
                bucket_request_payer = s3lib.resource_op({'obj': rgw_conn,
                                                          'resource': 'BucketRequestPayment',
                                                          'args': [bucket.name]
                                                          })
                # change the bucket request payer to 'requester'
                payer = {'Payer': 'Requester'}
                response = s3lib.resource_op({'obj': bucket_request_payer,
                                              'resource': 'put',
                                              'kwargs': dict(RequestPaymentConfiguration=payer)})
                log.info(response)
                if response is not None:
                    response = HttpResponseParser(response)
                    if response.status_code == 200:
                        log.info('bucket created')
                    else:
                        raise TestExecError("bucket request payer modification failed")
                else:
                    raise TestExecError("bucket request payer modification failed")
                payer = bucket_request_payer.payer
                log.info('bucket request payer: %s' % payer)
                if payer != 'Requester':
                    TestExecError('Request payer is not set or changed properly ')
                log.info('s3 objects to create: %s' % config.objects_count)
                if config.objects_count is not None:
                    log.info('objects size range:\n%s' % config.objects_size_range)
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)