def initialize_verify_io(self): log.info("***************Starting Verification*****************") data = self.file_op.get_data() rgw_user_info = data["users"][0] log.info("verifying data for the user: \n") auth = Auth(rgw_user_info) self.rgw_conn = auth.do_auth() self.rgw_conn2 = auth.do_auth_using_client() self.io = rgw_user_info["io"] for each_io in self.io: if each_io["s3_convention"] == "bucket": self.buckets.append(each_io["name"]) if each_io["s3_convention"] == "object": temp = { "name": each_io["name"], "md5": each_io["md5"], "bucket": each_io["bucket"], "type": each_io["type"], } self.objects.append(temp) log.info("buckets:\n%s" % self.buckets) for object in self.objects: log.info("object: %s" % object) log.info("verification of buckets starting")
def initialize_verify_io(self): log.info('***************Starting Verification*****************') data = self.file_op.get_data() rgw_user_info = data['users'][0] log.info('verifying data for the user: \n') auth = Auth(rgw_user_info) self.rgw_conn = auth.do_auth() self.rgw_conn2 = auth.do_auth_using_client() self.io = rgw_user_info['io'] for each_io in self.io: if each_io['s3_convention'] == 'bucket': self.buckets.append(each_io['name']) if each_io['s3_convention'] == 'object': temp = { 'name': each_io['name'], 'md5': each_io['md5'], 'bucket': each_io['bucket'], 'type': each_io['type'] } self.objects.append(temp) log.info('buckets:\n%s' % self.buckets) for object in self.objects: log.info('object: %s' % object) log.info('verification of buckets starting')
def test_exec(config): test_info = AddTestInfo('Test Byte range') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: test_info.started_info() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() # create buckets log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=1) bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user) # uploading data log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user) log.info('testing for negative range') response = rgw_conn2.get_object(Bucket=bucket.name, Key=s3_object_name, Range='-2--1') log.info('response: %s\n' % response) log.info('Content-Lenght: %s' % response['ContentLength']) log.info('s3_object_size: %s' % (config.obj_size * 1024 * 1024)) if response['ContentLength'] != config.obj_size * 1024 * 1024: TestExecError("Content Lenght not matched") log.info('testing for one positive and one negative range') response = rgw_conn2.get_object(Bucket=bucket.name, Key=s3_object_name, Range='-1-3') log.info('response: %s\n' % response) log.info('Content-Length: %s' % response['ContentLength']) log.info('s3_object_size: %s' % (config.obj_size * 1024 * 1024)) if response['ContentLength'] != config.obj_size * 1024 * 1024: TestExecError("Content Lenght not matched") test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): test_info = AddTestInfo("Test Byte range") io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) test_info.started_info() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() # create buckets log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=1 ) bucket = reusable.create_bucket(bucket_name, rgw_conn, each_user) # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in config.mapped_sizes.items(): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user ) log.info("testing for negative range") response = rgw_conn2.get_object( Bucket=bucket.name, Key=s3_object_name, Range="-2--1" ) log.info("response: %s\n" % response) log.info("Content-Lenght: %s" % response["ContentLength"]) log.info("s3_object_size: %s" % (config.obj_size * 1024 * 1024)) if response["ContentLength"] != config.obj_size * 1024 * 1024: TestExecError("Content Lenght not matched") log.info("testing for one positive and one negative range") response = rgw_conn2.get_object( Bucket=bucket.name, Key=s3_object_name, Range="-1-3" ) log.info("response: %s\n" % response) log.info("Content-Length: %s" % response["ContentLength"]) log.info("s3_object_size: %s" % (config.obj_size * 1024 * 1024)) if response["ContentLength"] != config.obj_size * 1024 * 1024: TestExecError("Content Lenght not matched")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) # create user config.user_count = 1 tenant1 = "MountEverest" tenant2 = "Himalayas" tenant1_user_info = s3lib.create_tenant_users( tenant_name=tenant1, no_of_users_to_create=config.user_count) tenant1_user1_info = tenant1_user_info[0] tenant2_user_info = s3lib.create_tenant_users( tenant_name=tenant2, no_of_users_to_create=config.user_count) tenant2_user1_info = tenant2_user_info[0] tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl) tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl) rgw_tenant1_user1 = tenant1_user1_auth.do_auth() rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client() rgw_tenant2_user1 = tenant2_user1_auth.do_auth() rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client() bucket_name1 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=1) t1_u1_bucket1 = reusable.create_bucket( bucket_name1, rgw_tenant1_user1, tenant1_user1_info, ) bucket_name2 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=2) t1_u1_bucket2 = reusable.create_bucket( bucket_name2, rgw_tenant1_user1, tenant1_user1_info, ) bucket_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info["user_id"]], actions_list=["CreateBucket"], resources=[t1_u1_bucket1.name], ) bucket_policy = json.dumps(bucket_policy_generated) log.info("jsoned policy:%s\n" % bucket_policy) log.info("bucket_policy_generated:%s\n" % bucket_policy_generated) bucket_policy_obj = s3lib.resource_op({ "obj": rgw_tenant1_user1, "resource": "BucketPolicy", "args": [t1_u1_bucket1.name], }) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") # get policy get_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) log.info("got bucket policy:%s\n" % get_policy["Policy"]) # modifying bucket policy to take new policy if config.bucket_policy_op == "modify": # adding new action list: ListBucket to existing action: CreateBucket log.info("modifying buckey policy") actions_list = ["ListBucket", "CreateBucket"] actions = list(map(s3_bucket_policy.gen_action, actions_list)) bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info["user_id"]], actions_list=actions_list, resources=[t1_u1_bucket1.name], ) bucket_policy2 = json.dumps(bucket_policy2_generated) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) modified_policy = json.loads(get_modified_policy["Policy"]) log.info("got bucket policy:%s\n" % modified_policy) actions_list_from_modified_policy = modified_policy["Statement"][0][ "Action"] cleaned_actions_list_from_modified_policy = list( map(str, actions_list_from_modified_policy)) log.info("cleaned_actions_list_from_modified_policy: %s" % cleaned_actions_list_from_modified_policy) log.info("actions list to be modified: %s" % actions) cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy) log.info("cmp_val: %s" % cmp_val) if cmp_val != 0: raise TestExecError("modification of bucket policy failed ") if config.bucket_policy_op == "replace": log.info("replacing new bucket policy") new_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info["user_id"]], actions_list=["ListBucket"], resources=[t1_u1_bucket2.name], ) new_policy = json.dumps(new_policy_generated) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("new bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") if config.bucket_policy_op == "delete": log.info("in delete bucket policy") delete_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "delete", "args": None }) if delete_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if delete_policy is not None: response = HttpResponseParser(delete_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy deleted") else: raise TestExecError("bucket policy deletion failed") else: raise TestExecError("bucket policy deletion failed") # confirming once again by calling get_bucket_policy try: rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name) raise TestExecError("bucket policy did not get deleted") except boto3exception.ClientError as e: log.info(e.response) response = HttpResponseParser(e.response) if response.error["Code"] == "NoSuchBucketPolicy": log.info("bucket policy deleted") else: raise TestExecError("bucket policy did not get deleted") # log.info('get_policy after deletion: %s' % get_policy) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): test_info = AddTestInfo("create m buckets with n objects with bucket life cycle") io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: test_info.started_info() # create user all_users_info = s3lib.create_users(config.user_count, config.cluster_name) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() # create buckets if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=1 ) bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user) if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name(bucket.name, oc) resuables.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) bucket_life_cycle = s3lib.resource_op( { "obj": rgw_conn, "resource": "BucketLifecycleConfiguration", "args": [bucket.name], } ) life_cycle = basic_lifecycle_config( prefix="key", days=20, id="rul1" ) put_bucket_life_cycle = s3lib.resource_op( { "obj": bucket_life_cycle, "resource": "put", "kwargs": dict(LifecycleConfiguration=life_cycle), } ) log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield" ) if put_bucket_life_cycle is not None: response = HttpResponseParser(put_bucket_life_cycle) if response.status_code == 200: log.info("bucket life cycle added") else: raise TestExecError("bucket lifecycle addition failed") else: raise TestExecError("bucket lifecycle addition failed") log.info("trying to retrieve bucket lifecycle config") get_bucket_life_cycle_config = s3lib.resource_op( { "obj": rgw_conn2, "resource": "get_bucket_lifecycle_configuration", "kwargs": dict(Bucket=bucket.name), } ) if get_bucket_life_cycle_config is False: raise TestExecError("bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser(get_bucket_life_cycle_config) if response.status_code == 200: log.info("bucket life cycle retrieved") else: raise TestExecError( "bucket lifecycle config retrieval failed" ) else: raise TestExecError("bucket life cycle retrieved") test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 30 config.rgw_lc_max_worker = 10 log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) _, version_name = utils.get_ceph_version() if "nautilus" in version_name: ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker)) else: ceph_conf.set_to_ceph_conf( section=None, option=ConfigOpts.rgw_lc_max_worker, value=str(config.rgw_lc_max_worker), ) ceph_conf.set_to_ceph_conf(section=None, option=ConfigOpts.rgw_lc_debug_interval, value="30") log.info("trying to restart services") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") config.user_count = 1 config.bucket_count = 1 # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info("no of buckets to create: %s" % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1) obj_list = [] obj_tag = "suffix1=WMV1" bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) prefix = list( map( lambda x: x, [ rule["Filter"].get("Prefix") or rule["Filter"]["And"].get("Prefix") for rule in config.lifecycle_conf ], )) prefix = prefix if prefix else ["dummy1"] if config.test_ops["enable_versioning"] is True: reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) if config.test_ops["create_object"] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + "." + bucket.name + "." + str(oc) obj_list.append(s3_object_name) if config.test_ops["version_count"] > 0: for vc in range(config.test_ops["version_count"]): log.info("version count for %s is %s" % (s3_object_name, str(vc))) log.info("modifying data: %s" % s3_object_name) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=True, append_msg="hello object for version: %s\n" % str(vc), ) else: log.info("s3 objects to create: %s" % config.objects_count) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_prefix_rule(bucket, config) if config.test_ops["delete_marker"] is True: life_cycle_rule_new = {"Rules": config.delete_marker_ops} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config) if config.test_ops["enable_versioning"] is False: if config.test_ops["create_object"] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + "." + bucket.name + "." + str(oc) obj_list.append(s3_object_name) reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_and_rule(bucket, config) reusable.remove_user(user_info) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() # create buckets if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=1) bucket = reusable.create_bucket(bucket_name, rgw_conn, each_user) if config.test_ops["enable_versioning"] is True: log.info("bucket versionig test on bucket: %s" % bucket.name) # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name) bucket_versioning = s3lib.resource_op({ "obj": rgw_conn, "resource": "BucketVersioning", "args": [bucket.name], }) version_status = s3lib.resource_op({ "obj": bucket_versioning, "resource": "status", "args": None }) if version_status is None: log.info("bucket versioning still not enabled") # enabling bucket versioning version_enable_status = s3lib.resource_op({ "obj": bucket_versioning, "resource": "enable", "args": None }) response = HttpResponseParser(version_enable_status) if response.status_code == 200: log.info("version enabled") else: raise TestExecError("version enable failed") if config.test_ops["create_object"] is True: # upload data for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket.name, oc) if config.test_ops["version_count"] > 0: for vc in range(config.test_ops["version_count"]): log.info("version count for %s is %s" % (s3_object_name, str(vc))) log.info("modifying data: %s" % s3_object_name) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, append_data=True, append_msg="hello object for version: %s\n" % str(vc), ) else: log.info("s3 objects to create: %s" % config.objects_count) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) bucket_life_cycle = s3lib.resource_op({ "obj": rgw_conn, "resource": "BucketLifecycleConfiguration", "args": [bucket.name], }) life_cycle = basic_lifecycle_config(prefix="key", days=20, id="rul1") put_bucket_life_cycle = s3lib.resource_op({ "obj": bucket_life_cycle, "resource": "put", "kwargs": dict(LifecycleConfiguration=life_cycle), }) log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_bucket_life_cycle is not None: response = HttpResponseParser(put_bucket_life_cycle) if response.status_code == 200: log.info("bucket life cycle added") else: raise TestExecError("bucket lifecycle addition failed") else: raise TestExecError("bucket lifecycle addition failed") log.info("trying to retrieve bucket lifecycle config") get_bucket_life_cycle_config = s3lib.resource_op({ "obj": rgw_conn2, "resource": "get_bucket_lifecycle_configuration", "kwargs": dict(Bucket=bucket.name), }) if get_bucket_life_cycle_config is False: raise TestExecError( "bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser(get_bucket_life_cycle_config) if response.status_code == 200: log.info("bucket life cycle retrieved") else: raise TestExecError( "bucket lifecycle config retrieval failed") else: raise TestExecError("bucket life cycle retrieved") if config.test_ops["create_object"] is True: for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name( bucket.name, oc) if config.test_ops["version_count"] > 0: if (config.test_ops.get("delete_versioned_object", None) is True): log.info( "list all the versions of the object and delete the " "current version of the object") log.info("all versions for the object: %s\n" % s3_object_name) versions = bucket.object_versions.filter( Prefix=s3_object_name) t1 = [] for version in versions: log.info( "key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) t1.append(version.version_id) s3_object = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [bucket.name, s3_object_name], }) # log.info('object version to delete: %s -> %s' % (versions[0].object_key, # versions[0].version_id)) delete_response = s3_object.delete() log.info("delete response: %s" % delete_response) if delete_response["DeleteMarker"] is True: log.info( "object delete marker is set to true") else: raise TestExecError( "'object delete marker is set to false" ) log.info( "available versions for the object after delete marker is set" ) t2 = [] versions_after_delete_marker_is_set = ( bucket.object_versions.filter( Prefix=s3_object_name)) for version in versions_after_delete_marker_is_set: log.info( "key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) t2.append(version.version_id) t2.pop() if t1 == t2: log.info("versions remained intact") else: raise TestExecError( "versions are not intact after delete marker is set" ) # modify bucket lifecycle configuration, modify expiration days here for the test case. if config.test_ops.get("modify_lifecycle", False) is True: log.info("modifying lifecycle configuration") life_cycle_modifed = basic_lifecycle_config( prefix="key", days=15, id="rul1", status="Disabled") put_bucket_life_cycle = s3lib.resource_op({ "obj": bucket_life_cycle, "resource": "put", "kwargs": dict(LifecycleConfiguration=life_cycle_modifed), }) log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield" ) if put_bucket_life_cycle is not None: response = HttpResponseParser(put_bucket_life_cycle) if response.status_code == 200: log.info("bucket life cycle added") else: raise TestExecError( "bucket lifecycle addition failed") else: raise TestExecError("bucket lifecycle addition failed") log.info("trying to retrieve bucket lifecycle config") get_bucket_life_cycle_config = s3lib.resource_op({ "obj": rgw_conn2, "resource": "get_bucket_lifecycle_configuration", "kwargs": dict(Bucket=bucket.name), }) if get_bucket_life_cycle_config is False: raise TestExecError( "bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser( get_bucket_life_cycle_config) modified_expiration_days = get_bucket_life_cycle_config[ "Rules"][0]["Expiration"]["Days"] log.info("modified expiration days: %s" % modified_expiration_days) if (response.status_code == 200 and modified_expiration_days == 15): log.info( "bucket life cycle retrieved after modifying") else: raise TestExecError( "bucket lifecycle config retrieval failed after modifying" ) else: raise TestExecError( "bucket lifecycle config retrieval failed after modifying" ) # disable bucket lifecycle configuration if config.test_ops.get("disable_lifecycle", False) is True: log.info("disabling lifecycle configuration") life_cycle_disabled_config = basic_lifecycle_config( prefix="key", days=20, id="rul1", status="Disabled") put_bucket_life_cycle = s3lib.resource_op({ "obj": bucket_life_cycle, "resource": "put", "kwargs": dict( LifecycleConfiguration=life_cycle_disabled_config), }) log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield" ) if put_bucket_life_cycle is not None: response = HttpResponseParser(put_bucket_life_cycle) if response.status_code == 200: log.info("bucket life cycle added") else: raise TestExecError( "bucket lifecycle addition failed") else: raise TestExecError("bucket lifecycle addition failed") log.info("trying to retrieve bucket lifecycle config") get_bucket_life_cycle_config = s3lib.resource_op({ "obj": rgw_conn2, "resource": "get_bucket_lifecycle_configuration", "kwargs": dict(Bucket=bucket.name), }) if get_bucket_life_cycle_config is False: raise TestExecError( "bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser( get_bucket_life_cycle_config) if (response.status_code == 200 and get_bucket_life_cycle_config["Rules"][0] ["Status"] == "Disabled"): log.info("disabled_status: %s" % get_bucket_life_cycle_config["Rules"][0] ["Status"]) log.info( "bucket life cycle retrieved after disabled") else: raise TestExecError( "bucket lifecycle config retrieval failed after disabled" ) else: raise TestExecError( "bucket lifecycle config retrieval failed after disabled" ) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_config_set = CephConfOp() rgw_service = RGWService() if config.sts is None: raise TestExecError("sts policies are missing in yaml config") # create users config.user_count = 2 users_info = s3lib.create_users(config.user_count) user1, user2 = users_info[0], users_info[1] log.info("adding sts config to ceph.conf") sesison_encryption_token = "abcdefghijklmnoq" ceph_config_set.set_to_ceph_conf( "global", ConfigOpts.rgw_sts_key, sesison_encryption_token ) ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") # Adding caps for user1 add_caps_cmd = ( 'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format( user_id=user1["user_id"] ) ) utils.exec_shell_cmd(add_caps_cmd) # user1 auth with iam_client auth = Auth(user1, ssl=config.ssl) iam_client = auth.do_auth_iam_client() # policy document policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "") policy_document = policy_document.replace("<user_name>", user2["user_id"]) print(policy_document) # role policy role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "") print(role_policy) role_name = f"S3RoleOf.{user1['user_id']}" log.info(f"role_name: {role_name}") # role creation happens here log.info("creating role") create_role_response = iam_client.create_role( AssumeRolePolicyDocument=policy_document, Path="/", RoleName=role_name, ) log.info("create_role_response") log.info(create_role_response) # Put role policy happening here policy_name = f"policy.{user1['user_id']}" log.info(f"policy_name: {policy_name}") log.info("putting role policy") put_policy_response = iam_client.put_role_policy( RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy ) log.info("put_policy_response") log.info(put_policy_response) # bucket creation operations now bucket_name = "testbucket" + user1["user_id"] # authenticating user1 for bucket creation operation auth = Auth(user1, ssl=config.ssl) user1_info = { "access_key": user1["access_key"], "secret_key": user1["secret_key"], "user_id": user1["user_id"], } s3_client_u1 = auth.do_auth() # bucket creation operation bucket = reusable.create_bucket(bucket_name, s3_client_u1, user1_info) # uploading objects to the bucket if config.test_ops["create_object"]: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket_name, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, user1_info, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, user1_info, ) auth = Auth(user2, ssl=config.ssl) sts_client = auth.do_auth_sts_client() log.info("assuming role") assume_role_response = sts_client.assume_role( RoleArn=create_role_response["Role"]["Arn"], RoleSessionName=user1["user_id"], DurationSeconds=3600, ) log.info(assume_role_response) assumed_role_user_info = { "access_key": assume_role_response["Credentials"]["AccessKeyId"], "secret_key": assume_role_response["Credentials"]["SecretAccessKey"], "session_token": assume_role_response["Credentials"]["SessionToken"], "user_id": user2["user_id"], } log.info("got the credentials after assume role") s3client = Auth(assumed_role_user_info, ssl=config.ssl) s3_client = s3client.do_auth_using_client() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() basic_io_structure = BasicIOInfoStructure() user_info = basic_io_structure.user( **{ "user_id": assumed_role_user_info["user_id"], "access_key": assumed_role_user_info["access_key"], "secret_key": assumed_role_user_info["secret_key"], } ) write_user_info.add_user_info(user_info) unexisting_object = bucket_name + "_unexisting_object" try: response = s3_client.head_object(Bucket=bucket_name, Key=unexisting_object) except botocore.exceptions.ClientError as e: response_code = e.response["Error"]["Code"] log.info(response_code) if e.response["Error"]["Code"] == "404": log.info("404 Unexisting Object Not Found") elif e.response["Error"]["Code"] == "403": raise TestExecError("Error code : 403 - HeadObject operation: Forbidden")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) # create user config.user_count = 1 tenant1 = 'MountEverest' tenant2 = 'Himalayas' tenant1_user_info = s3lib.create_tenant_users( tenant_name=tenant1, no_of_users_to_create=config.user_count) tenant1_user1_info = tenant1_user_info[0] tenant2_user_info = s3lib.create_tenant_users( tenant_name=tenant2, no_of_users_to_create=config.user_count) tenant2_user1_info = tenant2_user_info[0] tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl) tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl) rgw_tenant1_user1 = tenant1_user1_auth.do_auth() rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client() rgw_tenant2_user1 = tenant2_user1_auth.do_auth() rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client() bucket_name1 = utils.gen_bucket_name_from_userid( tenant1_user1_info['user_id'], rand_no=1) t1_u1_bucket1 = reusable.create_bucket( bucket_name1, rgw_tenant1_user1, tenant1_user1_info, ) bucket_name2 = utils.gen_bucket_name_from_userid( tenant1_user1_info['user_id'], rand_no=2) t1_u1_bucket2 = reusable.create_bucket( bucket_name2, rgw_tenant1_user1, tenant1_user1_info, ) bucket_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info['user_id']], actions_list=['CreateBucket'], resources=[t1_u1_bucket1.name]) bucket_policy = json.dumps(bucket_policy_generated) log.info('jsoned policy:%s\n' % bucket_policy) log.info('bucket_policy_generated:%s\n' % bucket_policy_generated) bucket_policy_obj = s3lib.resource_op({ 'obj': rgw_tenant1_user1, 'resource': 'BucketPolicy', 'args': [t1_u1_bucket1.name] }) put_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'put', 'kwargs': dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy) }) log.info('put policy response:%s\n' % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200: log.info('bucket policy created') else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") # get policy get_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) log.info('got bucket policy:%s\n' % get_policy['Policy']) # modifying bucket policy to take new policy if config.bucket_policy_op == 'modify': # adding new action list: ListBucket to existing action: CreateBucket log.info('modifying buckey policy') actions_list = ['ListBucket', 'CreateBucket'] actions = list(map(s3_bucket_policy.gen_action, actions_list)) bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info['user_id']], actions_list=actions_list, resources=[t1_u1_bucket1.name]) bucket_policy2 = json.dumps(bucket_policy2_generated) put_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'put', 'kwargs': dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2) }) log.info('put policy response:%s\n' % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200: log.info('bucket policy created') else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) modified_policy = json.loads(get_modified_policy['Policy']) log.info('got bucket policy:%s\n' % modified_policy) actions_list_from_modified_policy = modified_policy['Statement'][0][ 'Action'] cleaned_actions_list_from_modified_policy = list( map(str, actions_list_from_modified_policy)) log.info('cleaned_actions_list_from_modified_policy: %s' % cleaned_actions_list_from_modified_policy) log.info('actions list to be modified: %s' % actions) cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy) log.info('cmp_val: %s' % cmp_val) if cmp_val != 0: raise TestExecError("modification of bucket policy failed ") if config.bucket_policy_op == 'replace': log.info('replacing new bucket policy') new_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant2_user1_info['user_id']], actions_list=['ListBucket'], resources=[t1_u1_bucket2.name]) new_policy = json.dumps(new_policy_generated) put_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'put', 'kwargs': dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy) }) log.info('put policy response:%s\n' % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200: log.info('new bucket policy created') else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") if config.bucket_policy_op == 'delete': log.info('in delete bucket policy') delete_policy = s3lib.resource_op({ 'obj': bucket_policy_obj, 'resource': 'delete', 'args': None }) if delete_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if delete_policy is not None: response = HttpResponseParser(delete_policy) if response.status_code == 200: log.info('bucket policy deleted') else: raise TestExecError("bucket policy deletion failed") else: raise TestExecError("bucket policy deletion failed") # confirming once again by calling get_bucket_policy try: rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name) raise TestExecError("bucket policy did not get deleted") except boto3exception.ClientError as e: log.info(e.response) response = HttpResponseParser(e.response) if response.error['Code'] == 'NoSuchBucketPolicy': log.info('bucket policy deleted') else: raise TestExecError("bucket policy did not get deleted") # log.info('get_policy after deletion: %s' % get_policy) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authentication auth = Auth(each_user, ssl=config.ssl) s3_conn_client = auth.do_auth_using_client() # create buckets with object lock configuration if config.test_ops["create_bucket"] is True: log.info(f"no of buckets to create: {config.bucket_count}") for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) log.info(f"creating bucket with name: {bucket_name_to_create}") rgw_ip_and_port = get_rgw_ip_and_port() s3_conn_client.create_bucket( Bucket=bucket_name_to_create, ObjectLockEnabledForBucket=True ) # put object lock configuration for bucket s3_conn_client.put_object_lock_configuration( Bucket=bucket_name_to_create, ObjectLockConfiguration={ "ObjectLockEnabled": "Enabled", "Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}, }, ) if config.test_ops["create_object"] is True: # uploading data log.info(f"s3 objects to create: {config.objects_count}") for oc, size in list(config.mapped_sizes.items()): s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, 0 ) log.info(f"s3 object name: {s3_object_name}") s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info(f"s3 object path: {s3_object_path}") log.info("upload type: normal") io_generator(TEST_DATA_PATH + "/" + s3_object_name, size) s3_conn_client.put_object( Body=TEST_DATA_PATH + "/" + s3_object_name, Bucket=bucket_name_to_create, Key=s3_object_name, ) log.info("Verify version count") # Verify version count versions = s3_conn_client.list_object_versions( Bucket=bucket_name_to_create ) versions_count = len(versions["Versions"]) error_message = ( f"Expected: {config.objects_count}, Actual: {versions_count}" ) if versions_count == config.objects_count: log.info("Expected and actual version count is same") else: raise ObjectVersionCountMismatch(error_message) # Verify delete disabled for object log.info("Verify delete disabled for object") for version_dict in versions["Versions"]: try: s3_conn_client.delete_object( Bucket=bucket_name_to_create, Key=s3_object_name, VersionId=version_dict["VersionId"], ) raise AccessDeniedObjectDeleted( "Access denied object deleted" ) except boto3exception.ClientError as e: expected_code = "AccessDenied" actual_code = e.response["Error"]["Code"] assert ( actual_code == expected_code ), "Expected: {expected_code}, Actual: {actual_code}" # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 30 config.rgw_lc_max_worker = 10 log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) ceph_version = utils.exec_shell_cmd("ceph version") op = ceph_version.split() for i in op: if i == 'nautilus': ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker)) log.info('trying to restart services') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') config.user_count = 1 config.bucket_count = 1 # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info('no of buckets to create: %s' % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1) obj_list = [] obj_tag = 'suffix1=WMV1' bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) prefix = list(map(lambda x: x, [rule['Filter'].get('Prefix') or rule['Filter']['And'].get('Prefix') for rule in config.lifecycle_conf])) prefix = prefix if prefix else ['dummy1'] if config.test_ops['enable_versioning'] is True: reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) if config.test_ops['create_object'] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + '.' + bucket.name + '.' + str(oc) obj_list.append(s3_object_name) if config.test_ops['version_count'] > 0: for vc in range(config.test_ops['version_count']): log.info('version count for %s is %s' % (s3_object_name, str(vc))) log.info('modifying data: %s' % s3_object_name) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=True, append_msg='hello object for version: %s\n' % str(vc)) else: log.info('s3 objects to create: %s' % config.objects_count) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_prefix_rule(bucket, config) if config.test_ops['delete_marker'] is True: life_cycle_rule_new = {"Rules": config.delete_marker_ops} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config) if config.test_ops['enable_versioning'] is False: if config.test_ops['create_object'] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + '.' + bucket.name + '.' + str(oc) obj_list.append(s3_object_name) reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_and_rule(bucket, config) reusable.remove_user(user_info)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info('no of buckets to create: %s' % config.bucket_count) # create buckets if config.test_ops['create_bucket'] is True: for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, user_info) if config.test_ops['create_object'] is True: # uploading data log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get('upload_type') == 'multipart': log.info('upload type: multipart') reusable.upload_mutipart_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) else: log.info('upload type: normal') reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) if config.gc_verification is True: log.info('making changes to ceph.conf') config.rgw_gc_obj_min_wait = 5 ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait, str(config.rgw_gc_obj_min_wait)) log.info('trying to restart services') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') log.info('download the large object again to populate gc list with shadow entries') reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config) time.sleep(60) gc_list_output = json.loads(utils.exec_shell_cmd("radosgw-admin gc list --include-all")) log.info(gc_list_output) if gc_list_output: log.info("Shadow objects found after setting the rgw_gc_obj_min_wait to 5 seconds") utils.exec_shell_cmd("radosgw-admin gc process") log.info('Object download should not error out in 404 NoSuchKey error') reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config) reusable.remove_user(user_info)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) if config.test_ops.get("upload_type") == "multipart": srv_time_pre_op = get_svc_time() # create user tenant1 = "tenant_" + random.choice(string.ascii_letters) tenant1_user_info = s3lib.create_tenant_users(tenant_name=tenant1, no_of_users_to_create=2) tenant1_user1_info = tenant1_user_info[0] tenant1_user2_info = tenant1_user_info[1] tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl) tenant1_user2_auth = Auth(tenant1_user2_info, ssl=config.ssl) rgw_tenant1_user1 = tenant1_user1_auth.do_auth() rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client() rgw_tenant1_user2 = tenant1_user2_auth.do_auth() rgw_tenant1_user2_c = tenant1_user2_auth.do_auth_using_client() bucket_name1 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=1) t1_u1_bucket1 = reusable.create_bucket( bucket_name1, rgw_tenant1_user1, tenant1_user1_info, ) bucket_name2 = utils.gen_bucket_name_from_userid( tenant1_user1_info["user_id"], rand_no=2) t1_u1_bucket2 = reusable.create_bucket( bucket_name2, rgw_tenant1_user1, tenant1_user1_info, ) bucket_policy_generated = s3_bucket_policy.gen_bucket_policy( tenants_list=[tenant1], userids_list=[tenant1_user2_info["user_id"]], actions_list=["ListBucketMultiPartUploads"], resources=[t1_u1_bucket1.name], ) bucket_policy = json.dumps(bucket_policy_generated) log.info("jsoned policy:%s\n" % bucket_policy) bucket_policy_obj = s3lib.resource_op({ "obj": rgw_tenant1_user1, "resource": "BucketPolicy", "args": [t1_u1_bucket1.name], }) put_policy = s3lib.resource_op({ "obj": bucket_policy_obj, "resource": "put", "kwargs": dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy), }) log.info("put policy response:%s\n" % put_policy) if put_policy is False: raise TestExecError( "Resource execution failed: bucket creation faield") if put_policy is not None: response = HttpResponseParser(put_policy) if response.status_code == 200 or response.status_code == 204: log.info("bucket policy created") else: raise TestExecError("bucket policy creation failed") else: raise TestExecError("bucket policy creation failed") if config.test_ops.get("upload_type") == "multipart": for oc, size in list(config.mapped_sizes.items()): config.obj_size = size for bucket in [t1_u1_bucket1, t1_u1_bucket2]: s3_object_name = utils.gen_s3_object_name(bucket.name, oc) log.info("s3 objects to create: %s" % config.objects_count) reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, tenant1_user1_info, ) srv_time_post_op = get_svc_time() log.info(srv_time_pre_op) log.info(srv_time_post_op) if srv_time_post_op > srv_time_pre_op: log.info("Service is running without crash") else: raise TestExecError("Service got crashed") # get policy get_policy = rgw_tenant1_user1_c.get_bucket_policy( Bucket=t1_u1_bucket1.name) log.info("got bucket policy:%s\n" % get_policy["Policy"]) # List multipart uploads with tenant1_user2 user with bucket t1_u1_bucket1 multipart_object1 = rgw_tenant1_user2_c.list_multipart_uploads( Bucket=t1_u1_bucket1.name) log.info("Multipart object %s" % multipart_object1) # Verify tenant1_user2 not having permission for listing multipart uploads in t1_u1_bucket2 try: multipart_object2 = rgw_tenant1_user2_c.list_multipart_uploads( Bucket=t1_u1_bucket2.name) raise Exception( "%s user should not list multipart uploads in bucket: %s" % (tenant1_user2_info["user_id"], t1_u1_bucket2.name)) except ClientError as err: log.info("Listing failed as expected with exception: %s" % err) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): test_info = AddTestInfo('create m buckets with n objects') try: test_info.started_info() # get user with open('user_details') as fout: all_users_info = simplejson.load(fout) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth_using_client() rgw = auth.do_auth() bucket_list = [] buckets = rgw_conn.list_buckets() log.info('buckets are %s' % buckets) for each_bucket in buckets['Buckets']: bucket_list.append(each_bucket['Name']) for bucket_name in bucket_list: # create 'bucket' resource object bucket = rgw.Bucket(bucket_name) log.info('In bucket: %s' % bucket_name) if config.test_ops['create_object'] is True: # uploading data log.info('s3 objects to create: %s' % config.objects_count) for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name(bucket_name, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) s3_object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(s3_object_path, s3_object_size) if data_info is False: TestExecError("data creation failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': each_user['access_key']}, **data_info) # object_uploaded_status = bucket.upload_file(s3_object_path, s3_object_name) object_uploaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'upload_file', 'args': [s3_object_path, s3_object_name], 'extra_info': upload_info}) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') if config.test_ops['download_object'] is True: log.info('trying to download object: %s' % s3_object_name) s3_object_download_name = s3_object_name + "." + "download" s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name) log.info('s3_object_download_path: %s' % s3_object_download_path) log.info('downloading to filename: %s' % s3_object_download_name) # object_downloaded_status = bucket.download_file(s3_object_path, s3_object_name) object_downloaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'download_file', 'args': [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError("Resource execution failed: object download failed") if object_downloaded_status is None: log.info('object downloaded') if config.test_ops['delete_bucket_object'] is True: log.info('listing all objects in bucket: %s' % bucket.name) # objects = s3_ops.resource_op(bucket, 'objects', None) objects = s3lib.resource_op({'obj': bucket, 'resource': 'objects', 'args': None}) log.info('objects :%s' % objects) # all_objects = s3_ops.resource_op(objects, 'all') all_objects = s3lib.resource_op({'obj': objects, 'resource': 'all', 'args': None}) log.info('all objects: %s' % all_objects) for obj in all_objects: log.info('object_name: %s' % obj.key) log.info('deleting all objects in bucket') # objects_deleted = s3_ops.resource_op(objects, 'delete') objects_deleted = s3lib.resource_op({'obj': objects, 'resource': 'delete', 'args': None}) log.info('objects_deleted: %s' % objects_deleted) if objects_deleted is False: raise TestExecError('Resource execution failed: Object deletion failed') if objects_deleted is not None: response = HttpResponseParser(objects_deleted[0]) if response.status_code == 200: log.info('objects deleted ') else: raise TestExecError("objects deletion failed") else: raise TestExecError("objects deletion failed") # wait for object delete info to sync time.sleep(60) log.info('deleting bucket: %s' % bucket.name) # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete') bucket_deleted_status = s3lib.resource_op({'obj': bucket, 'resource': 'delete', 'args': None}) log.info('bucket_deleted_status: %s' % bucket_deleted_status) if bucket_deleted_status is not None: response = HttpResponseParser(bucket_deleted_status) if response.status_code == 204: log.info('bucket deleted ') else: raise TestExecError("bucket deletion failed") else: raise TestExecError("bucket deletion failed") test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) # create user user_info = s3lib.create_users(config.user_count) for each_user in user_info: auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() buckets = [] buckets_meta = [] if config.test_ops["create_bucket"]: log.info("no of buckets to create: %s" % config.bucket_count) # create bucket for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) bucket = reusable.create_bucket(bucket_name, rgw_conn, each_user) buckets.append(bucket_name) buckets_meta.append(bucket) if config.test_ops["create_object"]: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size log.info(f"s3 objects to create of size {config.obj_size}") s3_object_name = config.lifecycle_conf[0]["Filter"][ "Prefix" ] + str(oc) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info( f"s3 object path: {s3_object_path}, name: {s3_object_name}" ) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user ) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_bucket_lifecycle( bucket, rgw_conn, rgw_conn2, life_cycle_rule ) log.info(f"buckets are {buckets}") for bkt in buckets: bucket_details = json.loads( utils.exec_shell_cmd(f"radosgw-admin bucket stats --bucket={bkt}") ) num_objects = bucket_details["usage"]["rgw.main"]["num_objects"] log.info(f"objects count in bucket {bkt} is {num_objects}") lc_list_op_before = json.loads(utils.exec_shell_cmd("radosgw-admin lc list")) log.info(f"lc lists before lc process is {lc_list_op_before}") utils.exec_shell_cmd(f"radosgw-admin lc process --bucket {buckets[0]}") time.sleep(60) lc_list_op_after = json.loads(utils.exec_shell_cmd("radosgw-admin lc list")) log.info(f"lc lists after lc process is {lc_list_op_after}") completed_bucket = 0 completed_bkt_name = "" for data in lc_list_op_after: if data["status"] == "COMPLETE": completed_bucket += 1 completed_bkt_name = data["bucket"] bucket_details = json.loads( utils.exec_shell_cmd(f"radosgw-admin bucket stats --bucket={buckets[0]}") ) num_objects_after = bucket_details["usage"]["rgw.main"]["num_objects"] if config.object_expire: if ( completed_bucket == 1 and (buckets[0] in completed_bkt_name) and num_objects_after == 0 ): log.info(f"processing of single bucket:{buckets[0]} succeeded") else: raise TestExecError("LC Processing of a single bucket failed") else: if ( completed_bucket == 1 and (buckets[0] in completed_bkt_name) and num_objects_after == config.objects_count ): log.info(f"Successfully completed, non-expired objects did not deleted") else: raise TestExecError( "Failed! removed non-expired objects from the bucket" ) delete_conf = config.lifecycle_conf[0] delete_conf["Status"] = "Disabled" for bkt in buckets_meta: life_cycle_rule_delete = {"Rules": [delete_conf]} reusable.put_bucket_lifecycle( bkt, rgw_conn, rgw_conn2, life_cycle_rule_delete )
def test_exec(config): test_info = AddTestInfo( 'create m buckets with n objects with bucket life cycle') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: test_info.started_info() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() # create buckets if config.test_ops['create_bucket'] is True: log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( each_user['user_id'], rand_no=1) bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user) if config.test_ops['enable_versioning'] is True: log.info('bucket versionig test on bucket: %s' % bucket.name) # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name) bucket_versioning = s3lib.resource_op({ 'obj': rgw_conn, 'resource': 'BucketVersioning', 'args': [bucket.name] }) version_status = s3lib.resource_op({ 'obj': bucket_versioning, 'resource': 'status', 'args': None }) if version_status is None: log.info('bucket versioning still not enabled') # enabling bucket versioning version_enable_status = s3lib.resource_op({ 'obj': bucket_versioning, 'resource': 'enable', 'args': None }) response = HttpResponseParser(version_enable_status) if response.status_code == 200: log.info('version enabled') else: raise TestExecError("version enable failed") if config.test_ops['create_object'] is True: # upload data for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket.name, oc) if config.test_ops['version_count'] > 0: for vc in range( config.test_ops['version_count']): log.info('version count for %s is %s' % (s3_object_name, str(vc))) log.info('modifying data: %s' % s3_object_name) resuables.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, append_data=True, append_msg= 'hello object for version: %s\n' % str(vc)) else: log.info('s3 objects to create: %s' % config.objects_count) resuables.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user) bucket_life_cycle = s3lib.resource_op({ 'obj': rgw_conn, 'resource': 'BucketLifecycleConfiguration', 'args': [bucket.name] }) life_cycle = basic_lifecycle_config(prefix="key", days=20, id="rul1") put_bucket_life_cycle = s3lib.resource_op({ "obj": bucket_life_cycle, "resource": "put", "kwargs": dict(LifecycleConfiguration=life_cycle) }) log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield" ) if put_bucket_life_cycle is not None: response = HttpResponseParser(put_bucket_life_cycle) if response.status_code == 200: log.info('bucket life cycle added') else: raise TestExecError( "bucket lifecycle addition failed") else: raise TestExecError("bucket lifecycle addition failed") log.info('trying to retrieve bucket lifecycle config') get_bucket_life_cycle_config = s3lib.resource_op({ "obj": rgw_conn2, "resource": 'get_bucket_lifecycle_configuration', "kwargs": dict(Bucket=bucket.name) }) if get_bucket_life_cycle_config is False: raise TestExecError( "bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser( get_bucket_life_cycle_config) if response.status_code == 200: log.info('bucket life cycle retrieved') else: raise TestExecError( "bucket lifecycle config retrieval failed") else: raise TestExecError("bucket life cycle retrieved") if config.test_ops['create_object'] is True: for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name( bucket.name, oc) if config.test_ops['version_count'] > 0: if config.test_ops.get( 'delete_versioned_object', None) is True: log.info( 'list all the versions of the object and delete the ' 'current version of the object') log.info( 'all versions for the object: %s\n' % s3_object_name) versions = bucket.object_versions.filter( Prefix=s3_object_name) t1 = [] for version in versions: log.info( 'key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) t1.append(version.version_id) s3_object = s3lib.resource_op({ 'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, s3_object_name] }) # log.info('object version to delete: %s -> %s' % (versions[0].object_key, # versions[0].version_id)) delete_response = s3_object.delete() log.info('delete response: %s' % delete_response) if delete_response['DeleteMarker'] is True: log.info( 'object delete marker is set to true' ) else: raise TestExecError( "'object delete marker is set to false" ) log.info( 'available versions for the object after delete marker is set' ) t2 = [] versions_after_delete_marker_is_set = bucket.object_versions.filter( Prefix=s3_object_name) for version in versions_after_delete_marker_is_set: log.info( 'key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) t2.append(version.version_id) t2.pop() if t1 == t2: log.info('versions remained intact') else: raise TestExecError( 'versions are not intact after delete marker is set' ) # modify bucket lifecycle configuration, modify expiration days here for the test case. if config.test_ops.get('modify_lifecycle', False) is True: log.info('modifying lifecycle configuration') life_cycle_modifed = basic_lifecycle_config( prefix="key", days=15, id="rul1", status="Disabled") put_bucket_life_cycle = s3lib.resource_op({ "obj": bucket_life_cycle, "resource": "put", "kwargs": dict(LifecycleConfiguration=life_cycle_modifed) }) log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield" ) if put_bucket_life_cycle is not None: response = HttpResponseParser( put_bucket_life_cycle) if response.status_code == 200: log.info('bucket life cycle added') else: raise TestExecError( "bucket lifecycle addition failed") else: raise TestExecError( "bucket lifecycle addition failed") log.info('trying to retrieve bucket lifecycle config') get_bucket_life_cycle_config = s3lib.resource_op({ "obj": rgw_conn2, "resource": 'get_bucket_lifecycle_configuration', "kwargs": dict(Bucket=bucket.name) }) if get_bucket_life_cycle_config is False: raise TestExecError( "bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser( get_bucket_life_cycle_config) modified_expiration_days = get_bucket_life_cycle_config[ 'Rules'][0]['Expiration']['Days'] log.info('modified expiration days: %s' % modified_expiration_days) if response.status_code == 200 and modified_expiration_days == 15: log.info( 'bucket life cycle retrieved after modifying' ) else: raise TestExecError( "bucket lifecycle config retrieval failed after modifying" ) else: raise TestExecError( "bucket lifecycle config retrieval failed after modifying" ) # disable bucket lifecycle configuration if config.test_ops.get('disable_lifecycle', False) is True: log.info('disabling lifecycle configuration') life_cycle_disabled_config = basic_lifecycle_config( prefix="key", days=20, id="rul1", status="Disabled") put_bucket_life_cycle = s3lib.resource_op({ "obj": bucket_life_cycle, "resource": "put", "kwargs": dict(LifecycleConfiguration= life_cycle_disabled_config) }) log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle) if put_bucket_life_cycle is False: raise TestExecError( "Resource execution failed: bucket creation faield" ) if put_bucket_life_cycle is not None: response = HttpResponseParser( put_bucket_life_cycle) if response.status_code == 200: log.info('bucket life cycle added') else: raise TestExecError( "bucket lifecycle addition failed") else: raise TestExecError( "bucket lifecycle addition failed") log.info('trying to retrieve bucket lifecycle config') get_bucket_life_cycle_config = s3lib.resource_op({ "obj": rgw_conn2, "resource": 'get_bucket_lifecycle_configuration', "kwargs": dict(Bucket=bucket.name) }) if get_bucket_life_cycle_config is False: raise TestExecError( "bucket lifecycle config retrieval failed") if get_bucket_life_cycle_config is not None: response = HttpResponseParser( get_bucket_life_cycle_config) if response.status_code == 200 and get_bucket_life_cycle_config[ 'Rules'][0]['Status'] == 'Disabled': log.info('disabled_status: %s' % get_bucket_life_cycle_config['Rules'] [0]['Status']) log.info( 'bucket life cycle retrieved after disabled' ) else: raise TestExecError( "bucket lifecycle config retrieval failed after disabled" ) else: raise TestExecError( "bucket lifecycle config retrieval failed after disabled" ) test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() # authenticate sns client. rgw_sns_conn = auth.do_auth_sns_client() # authenticate with s3 client rgw_s3_client = auth.do_auth_using_client() # get ceph version ceph_version_id, ceph_version_name = utils.get_ceph_version() objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) # create topic with endpoint if config.test_ops["create_topic"] is True: endpoint = config.test_ops.get("endpoint") ack_type = config.test_ops.get("ack_type") topic_id = str(uuid.uuid4().hex[:16]) persistent = False topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id log.info( f"creating a topic with {endpoint} endpoint with ack type {ack_type}" ) if config.test_ops.get("persistent_flag", False): log.info("topic with peristent flag enabled") persistent = config.test_ops.get("persistent_flag") topic = notification.create_topic(rgw_sns_conn, endpoint, ack_type, topic_name, persistent) # get topic attributes if config.test_ops.get("get_topic_info", False): log.info("get topic attributes") get_topic_info = notification.get_topic( rgw_sns_conn, topic, ceph_version_name) # put bucket notification with topic configured for event if config.test_ops["put_get_bucket_notification"] is True: event = config.test_ops.get("event_type") notification_name = "notification-" + str(event) notification.put_bucket_notification( rgw_s3_client, bucket_name_to_create, notification_name, topic, event, ) # get bucket notification log.info( f"get bucket notification for bucket : {bucket_name_to_create}" ) notification.get_bucket_notification( rgw_s3_client, bucket_name_to_create) # create objects if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) # copy objects if config.test_ops.get("copy_object", False): log.info("copy object") status = rgw_s3_client.copy_object( Bucket=bucket_name_to_create, Key="copy_of_object" + s3_object_name, CopySource={ "Bucket": bucket_name_to_create, "Key": s3_object_name, }, ) if status is None: raise TestExecError("copy object failed") # delete objects if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, each_user) else: reusable.delete_objects(bucket) # start kafka broker and consumer event_record_path = "/home/cephuser/event_record" start_consumer = notification.start_kafka_broker_consumer( topic_name, event_record_path) if start_consumer is False: raise TestExecError("Kafka consumer not running") # verify all the attributes of the event record. if event not received abort testcase log.info("verify event record attributes") verify = notification.verify_event_record(event, bucket_name_to_create, event_record_path, ceph_version_name) if verify is False: raise EventRecordDataError( "Event record is empty! notification is not seen") # delete topic logs on kafka broker notification.del_topic_from_kafka_broker(topic_name) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 1 config.rgw_lifecycle_work_time = "00:00-23:59" log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) log.info("trying to restart services") srv_restarted = rgw_service.restart() if srv_restarted is False: raise TestExecError("RGW service restart failed") rgw_service.status() # create user user_info = s3lib.create_users(config.user_count) for each_user in user_info: auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() if config.test_ops["create_bucket"]: log.info("no of buckets to create: %s" % config.bucket_count) # create bucket for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=1) bucket = reusable.create_bucket(bucket_name, rgw_conn, each_user) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_bucket_lifecycle(bucket, rgw_conn, rgw_conn2, life_cycle_rule) if config.test_ops["create_object"]: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size log.info( f"s3 objects to create of size {config.obj_size}") s3_object_name = config.lifecycle_conf[0]["Filter"][ "Prefix"] + str(oc) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info( f"s3 object path: {s3_object_path}, name: {s3_object_name}" ) reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user) for i in (1, 100): time.sleep(60) bucket_details = json.loads( utils.exec_shell_cmd( f"radosgw-admin bucket stats --bucket={bucket.name}" )) if bucket_details["usage"]["rgw.main"]["num_objects"] == 0: break else: raise TestExecError( "Bucket object expiration taking longer than expected") gc_list_output = json.loads( utils.exec_shell_cmd( "radosgw-admin gc list --include-all")) if gc_list_output: log.info("Removing shadow objects found") utils.exec_shell_cmd( "radosgw-admin gc process --include-all") bucket_id = (bucket_details["id"] + "_" + config.lifecycle_conf[0]["Filter"]["Prefix"]) log.info( f"check for all the entry {bucket_id} for the bucket in data pool" ) obj_pool = utils.exec_shell_cmd( f"rados ls -p default.rgw.buckets.data | grep {bucket_id}") if obj_pool: for obj in obj_pool: object_name = obj.split("_")[-1] log.info(f"s3 object name to download: {object_name}") object_name_downloaded = object_name + "." + "download" object_download_path = os.path.join( TEST_DATA_PATH, object_name_downloaded) object_downloaded_status = s3lib.resource_op({ "obj": bucket, "resource": "download_file", "args": [object_name, object_download_path], }) if object_downloaded_status is False: log.info("As expected object is not Downloadable") if object_downloaded_status is None: raise TestExecError( "Objects are not listed but can be downloadable" ) if config.local_file_delete: log.info("deleting local file created after the upload") utils.exec_shell_cmd(f"rm -rf {TEST_DATA_PATH}") reusable.delete_bucket(bucket) reusable.remove_user(each_user) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")