def test_exec(config): test_info = AddTestInfo('create users') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) user_detail_file = os.path.join(lib_dir, 'user_details.json') try: test_info.started_info() # create a non-tenanted user if config.user_type == 'non-tenanted': all_users_info = s3lib.create_users(config.user_count) with open(user_detail_file, 'w') as fout: json.dump(all_users_info, fout) test_info.success_status('non-tenanted users creation completed') else: log.info('create tenanted users') for i in range(config.user_count): tenant_name = 'tenant' + str(i) all_users_info = s3lib.create_tenant_users( config.user_count, tenant_name) with open(user_detail_file, 'w') as fout: json.dump(all_users_info, fout) test_info.success_status('tenanted users creation completed') test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('user creation failed') sys.exit(1) except (RGWBaseException, Exception) as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('user creation failed') sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) # create user config.user_count = 1 tenant1 = "MountEverest" tenant2 = "Himalayas" tenant1_user_info = s3lib.create_tenant_users( tenant_name=tenant1, no_of_users_to_create=config.user_count) tenant1_user1_info = tenant1_user_info[0] tenant2_user_info = s3lib.create_tenant_users( tenant_name=tenant2, no_of_users_to_create=config.user_count) tenant2_user1_info = tenant2_user_info[0] tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl) tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl) rgw_tenant1_user1 = tenant1_user1_auth.do_auth() rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client() rgw_tenant2_user1 = tenant2_user1_auth.do_auth() rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client() bucket_name1 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=1) t1_u1_bucket1 = reusable.create_bucket( bucket_name1, rgw_tenant1_user1, tenant1_user1_info, ) bucket_name2 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=2) t1_u1_bucket2 = reusable.create_bucket( bucket_name2, rgw_tenant1_user1, tenant1_user1_info, ) bucket_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info["user_id"]], actions_list=["CreateBucket"], resources=[t1_u1_bucket1.name], ) bucket_policy = json.dumps(bucket_policy_generated) log.info("jsoned policy:%s\n" % bucket_policy) log.info("bucket_policy_generated:%s\n" % bucket_policy_generated) bucket_policy_obj = s3lib.resource_op({ "obj": rgw_tenant1_user1, "resource": "BucketPolicy", "args": [t1_u1_bucket1.name], }) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") # get policy get_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) log.info("got bucket policy:%s\n" % get_policy["Policy"]) # modifying bucket policy to take new policy if config.bucket_policy_op == "modify": # adding new action list: ListBucket to existing action: CreateBucket log.info("modifying buckey policy") actions_list = ["ListBucket", "CreateBucket"] actions = list(map(s3_bucket_policy.gen_action, actions_list)) bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info["user_id"]], actions_list=actions_list, resources=[t1_u1_bucket1.name], ) bucket_policy2 = json.dumps(bucket_policy2_generated) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) modified_policy = json.loads(get_modified_policy["Policy"]) log.info("got bucket policy:%s\n" % modified_policy) actions_list_from_modified_policy = modified_policy["Statement"][0][ "Action"] cleaned_actions_list_from_modified_policy = list( map(str, actions_list_from_modified_policy)) log.info("cleaned_actions_list_from_modified_policy: %s" % cleaned_actions_list_from_modified_policy) log.info("actions list to be modified: %s" % actions) cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy) log.info("cmp_val: %s" % cmp_val) if cmp_val != 0: raise TestExecError("modification of bucket policy failed ") if config.bucket_policy_op == "replace": log.info("replacing new bucket policy") new_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info["user_id"]], actions_list=["ListBucket"], resources=[t1_u1_bucket2.name], ) new_policy = json.dumps(new_policy_generated) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("new bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") if config.bucket_policy_op == "delete": log.info("in delete bucket policy") delete_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "delete", "args": None }) if delete_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if delete_policy is not None: response = HttpResponseParser(delete_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy deleted") else: raise TestExecError("bucket policy deletion failed") else: raise TestExecError("bucket policy deletion failed") # confirming once again by calling get_bucket_policy try: rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name) raise TestExecError("bucket policy did not get deleted") except boto3exception.ClientError as e: log.info(e.response) response = HttpResponseParser(e.response) if response.error["Code"] == "NoSuchBucketPolicy": log.info("bucket policy deleted") else: raise TestExecError("bucket policy did not get deleted") # log.info('get_policy after deletion: %s' % get_policy) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) non_ten_buckets = {} ten_buckets = {} user_names = ['bill', 'newbill', 'joe', 'newjoe'] tenant1 = 'tenant' non_ten_users = s3lib.create_users(config.user_count) ten_users = s3lib.create_tenant_users(config.user_count, tenant1) # Rename users if config.test_ops['rename_users'] is True: for user in non_ten_users: new_non_ten_name = 'new' + user['user_id'] out = reusable.rename_user(user['user_id'], new_non_ten_name) if out is False: raise TestExecError("RGW User rename error") log.info('output :%s' % out) user['user_id'] = new_non_ten_name for ten_user in ten_users: new_ten_name = 'new' + ten_user['user_id'] out1 = reusable.rename_user(ten_user['user_id'], new_ten_name, tenant1) if out1 is False: raise TestExecError("RGW User rename error") log.info('output :%s' % out1) ten_user['user_id'] = new_ten_name # create buckets and test rename for user in non_ten_users: auth = Auth(user, ssl=config.ssl) rgw_conn = auth.do_auth() bucket_name_to_create1 = utils.gen_bucket_name_from_userid( user['user_id']) log.info('creating bucket with name: %s' % bucket_name_to_create1) bucket = reusable.create_bucket(bucket_name_to_create1, rgw_conn, user) non_ten_buckets[user['user_id']] = bucket_name_to_create1 if config.test_ops['rename_buckets'] is True: bucket_new_name1 = 'new' + bucket_name_to_create1 non_ten_buckets[user['user_id']] = bucket_new_name1 out2 = reusable.rename_bucket(bucket.name, bucket_new_name1, user['user_id']) if out2 is False: raise TestExecError("RGW Bucket rename error") log.info('output :%s' % out2) for ten_user in ten_users: auth = Auth(ten_user, ssl=config.ssl) rgw_conn = auth.do_auth() bucket_name_to_create2 = utils.gen_bucket_name_from_userid( ten_user['user_id']) log.info('creating bucket with name: %s' % bucket_name_to_create2) bucket = reusable.create_bucket(bucket_name_to_create2, rgw_conn, ten_user) ten_buckets[ten_user['user_id']] = bucket_name_to_create2 if config.test_ops['rename_buckets'] is True: bucket_new_name2 = 'new' + bucket_name_to_create2 ten_buckets[ten_user['user_id']] = bucket_new_name2 out3 = reusable.rename_bucket(bucket.name, bucket_new_name2, ten_user['user_id'], tenant1) if out3 is False: raise TestExecError("RGW Bucket rename error") log.info('output :%s' % out3) if config.test_ops['bucket_link_unlink'] is True: # Bucket unlink and link from non tenanted to tenanted users out4 = reusable.unlink_bucket( non_ten_users[0]['user_id'], non_ten_buckets[non_ten_users[0]['user_id']]) if out4 is False: raise TestExecError("RGW Bucket unlink error") log.info('output :%s' % out4) reusable.link_chown_to_tenanted( ten_users[0]['user_id'], non_ten_buckets[non_ten_users[0]['user_id']], tenant1) # Bucket unlink and link from tenanted to non tenanted users out5 = reusable.unlink_bucket(ten_users[0]['user_id'], ten_buckets[ten_users[0]['user_id']], tenant1) if out5 is False: raise TestExecError("RGW Bucket unlink error") log.info('output :%s' % out5) reusable.link_chown_to_nontenanted( non_ten_users[0]['user_id'], ten_buckets[ten_users[0]['user_id']], tenant1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) non_ten_buckets = {} ten_buckets = {} user_names = ["bill", "newbill", "joe", "newjoe"] tenant1 = "tenant" non_ten_users = s3lib.create_users(config.user_count) ten_users = s3lib.create_tenant_users(config.user_count, tenant1) # Rename users if config.test_ops["rename_users"] is True: for user in non_ten_users: new_non_ten_name = "new" + user["user_id"] out = reusable.rename_user(user["user_id"], new_non_ten_name) if out is False: raise TestExecError("RGW User rename error") log.info("output :%s" % out) user["user_id"] = new_non_ten_name for ten_user in ten_users: new_ten_name = "new" + ten_user["user_id"] out1 = reusable.rename_user(ten_user["user_id"], new_ten_name, tenant1) if out1 is False: raise TestExecError("RGW User rename error") log.info("output :%s" % out1) ten_user["user_id"] = new_ten_name # create buckets and test rename for user in non_ten_users: auth = Auth(user, ssl=config.ssl) rgw_conn = auth.do_auth() bucket_name_to_create1 = utils.gen_bucket_name_from_userid( user["user_id"]) log.info("creating bucket with name: %s" % bucket_name_to_create1) bucket = reusable.create_bucket(bucket_name_to_create1, rgw_conn, user) non_ten_buckets[user["user_id"]] = bucket_name_to_create1 if config.test_ops["rename_buckets"] is True: bucket_new_name1 = "new" + bucket_name_to_create1 non_ten_buckets[user["user_id"]] = bucket_new_name1 out2 = reusable.rename_bucket(bucket.name, bucket_new_name1, user["user_id"]) if out2 is False: raise TestExecError("RGW Bucket rename error") log.info("output :%s" % out2) for ten_user in ten_users: auth = Auth(ten_user, ssl=config.ssl) rgw_conn = auth.do_auth() bucket_name_to_create2 = utils.gen_bucket_name_from_userid( ten_user["user_id"]) log.info("creating bucket with name: %s" % bucket_name_to_create2) bucket = reusable.create_bucket(bucket_name_to_create2, rgw_conn, ten_user) ten_buckets[ten_user["user_id"]] = bucket_name_to_create2 if config.test_ops["rename_buckets"] is True: bucket_new_name2 = "new" + bucket_name_to_create2 ten_buckets[ten_user["user_id"]] = bucket_new_name2 out3 = reusable.rename_bucket(bucket.name, bucket_new_name2, ten_user["user_id"], tenant1) if out3 is False: raise TestExecError("RGW Bucket rename error") log.info("output :%s" % out3) if config.test_ops["bucket_link_unlink"] is True: # Bucket unlink and link from non tenanted to tenanted users out4 = reusable.unlink_bucket( non_ten_users[0]["user_id"], non_ten_buckets[non_ten_users[0]["user_id"]]) if out4 is False: raise TestExecError("RGW Bucket unlink error") log.info("output :%s" % out4) reusable.link_chown_to_tenanted( ten_users[0]["user_id"], non_ten_buckets[non_ten_users[0]["user_id"]], tenant1, ) # Bucket unlink and link from tenanted to non tenanted users out5 = reusable.unlink_bucket(ten_users[0]["user_id"], ten_buckets[ten_users[0]["user_id"]], tenant1) if out5 is False: raise TestExecError("RGW Bucket unlink error") log.info("output :%s" % out5) reusable.link_chown_to_nontenanted( non_ten_users[0]["user_id"], ten_buckets[ten_users[0]["user_id"]], tenant1) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) # create user config.user_count = 1 tenant1 = 'MountEverest' tenant2 = 'Himalayas' tenant1_user_info = s3lib.create_tenant_users( tenant_name=tenant1, no_of_users_to_create=config.user_count) tenant1_user1_info = tenant1_user_info[0] tenant2_user_info = s3lib.create_tenant_users( tenant_name=tenant2, no_of_users_to_create=config.user_count) tenant2_user1_info = tenant2_user_info[0] tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl) tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl) rgw_tenant1_user1 = tenant1_user1_auth.do_auth() rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client() rgw_tenant2_user1 = tenant2_user1_auth.do_auth() rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client() bucket_name1 = utils.gen_bucket_name_from_userid( tenant1_user1_info['user_id'], rand_no=1) t1_u1_bucket1 = reusable.create_bucket( bucket_name1, rgw_tenant1_user1, tenant1_user1_info, ) bucket_name2 = utils.gen_bucket_name_from_userid( tenant1_user1_info['user_id'], rand_no=2) t1_u1_bucket2 = reusable.create_bucket( bucket_name2, rgw_tenant1_user1, tenant1_user1_info, ) bucket_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info['user_id']], actions_list=['CreateBucket'], resources=[t1_u1_bucket1.name]) bucket_policy = json.dumps(bucket_policy_generated) log.info('jsoned policy:%s\n' % bucket_policy) log.info('bucket_policy_generated:%s\n' % bucket_policy_generated) bucket_policy_obj = s3lib.resource_op({ 'obj': rgw_tenant1_user1, 'resource': 'BucketPolicy', 'args': [t1_u1_bucket1.name] }) put_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'put', 'kwargs': dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy) }) log.info('put policy response:%s\n' % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200: log.info('bucket policy created') else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") # get policy get_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) log.info('got bucket policy:%s\n' % get_policy['Policy']) # modifying bucket policy to take new policy if config.bucket_policy_op == 'modify': # adding new action list: ListBucket to existing action: CreateBucket log.info('modifying buckey policy') actions_list = ['ListBucket', 'CreateBucket'] actions = list(map(s3_bucket_policy.gen_action, actions_list)) bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info['user_id']], actions_list=actions_list, resources=[t1_u1_bucket1.name]) bucket_policy2 = json.dumps(bucket_policy2_generated) put_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'put', 'kwargs': dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2) }) log.info('put policy response:%s\n' % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200: log.info('bucket policy created') else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) modified_policy = json.loads(get_modified_policy['Policy']) log.info('got bucket policy:%s\n' % modified_policy) actions_list_from_modified_policy = modified_policy['Statement'][0][ 'Action'] cleaned_actions_list_from_modified_policy = list( map(str, actions_list_from_modified_policy)) log.info('cleaned_actions_list_from_modified_policy: %s' % cleaned_actions_list_from_modified_policy) log.info('actions list to be modified: %s' % actions) cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy) log.info('cmp_val: %s' % cmp_val) if cmp_val != 0: raise TestExecError("modification of bucket policy failed ") if config.bucket_policy_op == 'replace': log.info('replacing new bucket policy') new_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info['user_id']], actions_list=['ListBucket'], resources=[t1_u1_bucket2.name]) new_policy = json.dumps(new_policy_generated) put_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'put', 'kwargs': dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy) }) log.info('put policy response:%s\n' % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200: log.info('new bucket policy created') else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") if config.bucket_policy_op == 'delete': log.info('in delete bucket policy') delete_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'delete', 'args': None }) if delete_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if delete_policy is not None: response = HttpResponseParser(delete_policy) if response.status_code == 200: log.info('bucket policy deleted') else: raise TestExecError("bucket policy deletion failed") else: raise TestExecError("bucket policy deletion failed") # confirming once again by calling get_bucket_policy try: rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name) raise TestExecError("bucket policy did not get deleted") except boto3exception.ClientError as e: log.info(e.response) response = HttpResponseParser(e.response) if response.error['Code'] == 'NoSuchBucketPolicy': log.info('bucket policy deleted') else: raise TestExecError("bucket policy did not get deleted") # log.info('get_policy after deletion: %s' % get_policy) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) if config.test_ops.get("upload_type") == "multipart": srv_time_pre_op = get_svc_time() # create user tenant1 = "tenant_" + random.choice(string.ascii_letters) tenant1_user_info = s3lib.create_tenant_users(tenant_name=tenant1, no_of_users_to_create=2) tenant1_user1_info = tenant1_user_info[0] tenant1_user2_info = tenant1_user_info[1] tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl) tenant1_user2_auth = Auth(tenant1_user2_info, ssl=config.ssl) rgw_tenant1_user1 = tenant1_user1_auth.do_auth() rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client() rgw_tenant1_user2 = tenant1_user2_auth.do_auth() rgw_tenant1_user2_c = tenant1_user2_auth.do_auth_using_client() bucket_name1 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=1) t1_u1_bucket1 = reusable.create_bucket( bucket_name1, rgw_tenant1_user1, tenant1_user1_info, ) bucket_name2 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=2) t1_u1_bucket2 = reusable.create_bucket( bucket_name2, rgw_tenant1_user1, tenant1_user1_info, ) bucket_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant1_user2_info["user_id"]], actions_list=["ListBucketMultiPartUploads"], resources=[t1_u1_bucket1.name], ) bucket_policy = json.dumps(bucket_policy_generated) log.info("jsoned policy:%s\n" % bucket_policy) bucket_policy_obj = s3lib.resource_op({ "obj": rgw_tenant1_user1, "resource": "BucketPolicy", "args": [t1_u1_bucket1.name], }) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") if config.test_ops.get("upload_type") == "multipart": for oc, size in list(config.mapped_sizes.items()): config.obj_size = size for bucket in [t1_u1_bucket1, t1_u1_bucket2]: s3_object_name = utils.gen_s3_object_name(bucket.name, oc) log.info("s3 objects to create: %s" % config.objects_count) reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, tenant1_user1_info, ) srv_time_post_op = get_svc_time() log.info(srv_time_pre_op) log.info(srv_time_post_op) if srv_time_post_op > srv_time_pre_op: log.info("Service is running without crash") else: raise TestExecError("Service got crashed") # get policy get_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) log.info("got bucket policy:%s\n" % get_policy["Policy"]) # List multipart uploads with tenant1_user2 user with bucket t1_u1_bucket1 multipart_object1 = rgw_tenant1_user2_c.list_multipart_uploads( Bucket=t1_u1_bucket1.name) log.info("Multipart object %s" % multipart_object1) # Verify tenant1_user2 not having permission for listing multipart uploads in t1_u1_bucket2 try: multipart_object2 = rgw_tenant1_user2_c.list_multipart_uploads( Bucket=t1_u1_bucket2.name) raise Exception( "%s user should not list multipart uploads in bucket: %s" % (tenant1_user2_info["user_id"], t1_u1_bucket2.name)) except ClientError as err: log.info("Listing failed as expected with exception: %s" % err) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")