def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() log.info('starting IO') config.user_count = 1 user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() log.info('sharding configuration will be added now.') if config.sharding_type == 'dynamic': log.info('sharding type is dynamic') # for dynamic, # the number of shards should be greater than [ (no of objects)/(max objects per shard) ] # example: objects = 500 ; max object per shard = 10 # then no of shards should be at least 50 or more time.sleep(15) log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard)) ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding, 'True') num_shards_expected = config.objects_count / config.max_objects_per_shard log.info('num_shards_expected: %s' % num_shards_expected) log.info('trying to restart services ') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') config.bucket_count = 1 objects_created_list = [] log.info('no of buckets to create: %s' % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1) bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) if config.test_ops.get('enable_version', False): log.info('enable bucket version') reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) if config.test_ops.get('enable_version', False): reusable.upload_version_object(config, user_info, rgw_conn, s3_object_name, config.obj_size, bucket, TEST_DATA_PATH) else: reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) objects_created_list.append((s3_object_name, s3_object_path)) if config.sharding_type == 'manual': log.info('sharding type is manual') # for manual. # the number of shards will be the value set in the command. time.sleep(15) log.info('in manual sharding') cmd_exec = utils.exec_shell_cmd( 'radosgw-admin bucket reshard --bucket=%s --num-shards=%s ' '--yes-i-really-mean-it' % (bucket.name, config.shards)) if cmd_exec is False: raise TestExecError("manual resharding command execution failed") sleep_time = 600 log.info(f'verification starts after waiting for {sleep_time} seconds') time.sleep(sleep_time) op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name) json_doc = json.loads(op) bucket_id = json_doc['data']['bucket']['bucket_id'] op2 = utils.exec_shell_cmd( "radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id)) json_doc2 = json.loads((op2)) num_shards_created = json_doc2['data']['bucket_info']['num_shards'] log.info('no_of_shards_created: %s' % num_shards_created) if config.sharding_type == 'manual': if config.shards != num_shards_created: raise TestExecError("expected number of shards not created") log.info('Expected number of shards created') if config.sharding_type == 'dynamic': log.info('Verify if resharding list is empty') reshard_list_op = json.loads( utils.exec_shell_cmd("radosgw-admin reshard list")) if not reshard_list_op: log.info( 'for dynamic number of shards created should be greater than or equal to number of expected shards' ) log.info('no_of_shards_expected: %s' % num_shards_expected) if int(num_shards_created) >= int(num_shards_expected): log.info('Expected number of shards created') else: raise TestExecError('Expected number of shards not created') if config.test_ops.get('delete_bucket_object', False): if config.test_ops.get('enable_version', False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, user_info) else: reusable.delete_objects(bucket) reusable.delete_bucket(bucket)
import os, sys, glob import json sys.path.append(os.path.abspath(os.path.join(__file__, "../../../.."))) import v2.lib.resource_op as s3lib import v2.utils.log as log import v2.utils.utils as utils from v2.utils.utils import HttpResponseParser from v2.lib.exceptions import TestExecError import v2.lib.manage_data as manage_data from v2.lib.s3.write_io_info import IOInfoInitialize, BasicIOInfoStructure, BucketIoInfo, KeyIoInfo io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() write_key_io_info = KeyIoInfo() def create_bucket(bucket_name, rgw, user_info): log.info('creating bucket with name: %s' % bucket_name) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({ 'obj': rgw, 'resource': 'Bucket', 'args': [bucket_name] }) created = s3lib.resource_op({ 'obj': bucket, 'resource': 'create', 'args': None, 'extra_info': { 'access_key': user_info['access_key']
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 30 config.rgw_lc_max_worker = 10 log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) _, version_name = utils.get_ceph_version() if "nautilus" in version_name: ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker)) else: ceph_conf.set_to_ceph_conf( section=None, option=ConfigOpts.rgw_lc_max_worker, value=str(config.rgw_lc_max_worker), ) ceph_conf.set_to_ceph_conf(section=None, option=ConfigOpts.rgw_lc_debug_interval, value="30") log.info("trying to restart services") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") config.user_count = 1 config.bucket_count = 1 # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info("no of buckets to create: %s" % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1) obj_list = [] obj_tag = "suffix1=WMV1" bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) prefix = list( map( lambda x: x, [ rule["Filter"].get("Prefix") or rule["Filter"]["And"].get("Prefix") for rule in config.lifecycle_conf ], )) prefix = prefix if prefix else ["dummy1"] if config.test_ops["enable_versioning"] is True: reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) if config.test_ops["create_object"] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + "." + bucket.name + "." + str(oc) obj_list.append(s3_object_name) if config.test_ops["version_count"] > 0: for vc in range(config.test_ops["version_count"]): log.info("version count for %s is %s" % (s3_object_name, str(vc))) log.info("modifying data: %s" % s3_object_name) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=True, append_msg="hello object for version: %s\n" % str(vc), ) else: log.info("s3 objects to create: %s" % config.objects_count) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_prefix_rule(bucket, config) if config.test_ops["delete_marker"] is True: life_cycle_rule_new = {"Rules": config.delete_marker_ops} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config) if config.test_ops["enable_versioning"] is False: if config.test_ops["create_object"] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + "." + bucket.name + "." + str(oc) obj_list.append(s3_object_name) reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_and_rule(bucket, config) reusable.remove_user(user_info) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() write_key_io_info = KeyIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) # create user all_users_info = s3lib.create_users(config.user_count) extra_user = s3lib.create_users(1)[0] extra_user_auth = Auth(extra_user, ssl=config.ssl) extra_user_conn = extra_user_auth.do_auth() for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() s3_object_names = [] # create buckets log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Bucket', 'args': [bucket_name_to_create]}) # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']}) created = s3lib.resource_op({'obj': bucket, 'resource': 'create', 'args': None, 'extra_info': {'access_key': each_user['access_key']}}) if created is False: raise TestExecError("Resource execution failed: bucket creation faield") if created is not None: response = HttpResponseParser(created) if response.status_code == 200: log.info('bucket created') else: raise TestExecError("bucket creation failed") else: raise TestExecError("bucket creation failed") # getting bucket version object if config.test_ops['enable_version'] is True: log.info('bucket versionig test on bucket: %s' % bucket.name) # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name) bucket_versioning = s3lib.resource_op({'obj': rgw_conn, 'resource': 'BucketVersioning', 'args': [bucket.name]}) # checking the versioning status # version_status = s3_ops.resource_op(bucket_versioning, 'status') version_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'status', 'args': None }) if version_status is None: log.info('bucket versioning still not enabled') # enabling bucket versioning # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable') version_enable_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'enable', 'args': None, }) response = HttpResponseParser(version_enable_status) if response.status_code == 200: log.info('version enabled') write_bucket_io_info.add_versioning_status(each_user['access_key'],bucket.name, VERSIONING_STATUS['ENABLED']) else: raise TestExecError("version enable failed") if config.objects_count > 0: log.info('s3 objects to create: %s' % config.objects_count) for oc, s3_object_size in list(config.mapped_sizes.items()): # versioning upload s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc)) s3_object_names.append(s3_object_name) log.info('s3 object name: %s' % s3_object_name) log.info('versioning count: %s' % config.version_count) s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc)) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) original_data_info = manage_data.io_generator(s3_object_path, s3_object_size) if original_data_info is False: TestExecError("data creation failed") created_versions_count = 0 for vc in range(config.version_count): log.info('version count for %s is %s' % (s3_object_name, str(vc))) log.info('modifying data: %s' % s3_object_name) modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op='append', **{'message': '\nhello for version: %s\n' % str(vc)}) if modified_data_info is False: TestExecError("data modification failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': each_user['access_key'], 'versioning_status': VERSIONING_STATUS['ENABLED'], 'version_count_no': vc}, **modified_data_info) s3_obj = s3lib.resource_op({'obj': bucket, 'resource': 'Object', 'args': [s3_object_name], 'extra_info': upload_info, }) object_uploaded_status = s3lib.resource_op({'obj': s3_obj, 'resource': 'upload_file', 'args': [modified_data_info['name']], 'extra_info': upload_info}) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') s3_obj = rgw_conn.Object(bucket.name, s3_object_name) log.info('current_version_id: %s' % s3_obj.version_id) key_version_info = basic_io_structure.version_info( **{'version_id': s3_obj.version_id, 'md5_local': upload_info['md5'], 'count_no': vc, 'size': upload_info['size']}) log.info('key_version_info: %s' % key_version_info) write_key_io_info.add_versioning_info(each_user['access_key'], bucket.name, s3_object_path, key_version_info) created_versions_count += 1 log.info('created_versions_count: %s' % created_versions_count) log.info('adding metadata') metadata1 = {"m_data1": "this is the meta1 for this obj"} s3_obj.metadata.update(metadata1) metadata2 = {"m_data2": "this is the meta2 for this obj"} s3_obj.metadata.update(metadata2) log.info('metadata for this object: %s' % s3_obj.metadata) log.info('metadata count for object: %s' % (len(s3_obj.metadata))) if not s3_obj.metadata: raise TestExecError('metadata not created even adding metadata') versions = bucket.object_versions.filter(Prefix=s3_object_name) created_versions_count_from_s3 = len([v.version_id for v in versions]) log.info('created versions count on s3: %s' % created_versions_count_from_s3) if created_versions_count is created_versions_count_from_s3: log.info('no new versions are created when added metdata') else: raise TestExecError("version count missmatch, " "possible creation of version on adding metadata") s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download") object_downloaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'download_file', 'args': [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError("Resource execution failed: object download failed") if object_downloaded_status is None: log.info('object downloaded') # checking md5 of the downloaded file s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path) log.info('downloaded_md5: %s' % s3_object_downloaded_md5) log.info('uploaded_md5: %s' % modified_data_info['md5']) # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path) log.info('all versions for the object: %s\n' % s3_object_name) versions = bucket.object_versions.filter(Prefix=s3_object_name) for version in versions: log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) if config.test_ops.get('set_acl', None) is True: s3_obj_acl = s3lib.resource_op({'obj': rgw_conn, 'resource': 'ObjectAcl', 'args': [bucket.name, s3_object_name]}) # setting acl to private, just need to set to any acl and # check if its set - check by response code acls_set_status = s3_obj_acl.put(ACL='private') response = HttpResponseParser(acls_set_status) if response.status_code == 200: log.info('ACLs set') else: raise TestExecError("Acls not Set") # get obj details based on version id for version in versions: log.info('getting info for version id: %s' % version.version_id) obj = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, s3_object_name]}) log.info('obj get detils :%s\n' % (obj.get(VersionId=version.version_id))) if config.test_ops['copy_to_version'] is True: # reverting object to one of the versions ( randomly chosen ) version_id_to_copy = random.choice([v.version_id for v in versions]) log.info('version_id_to_copy: %s' % version_id_to_copy) s3_obj = rgw_conn.Object(bucket.name, s3_object_name) log.info('current version_id: %s' % s3_obj.version_id) copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name, 'Key': s3_object_name, 'VersionId': version_id_to_copy}) log.info('copy_response: %s' % copy_response) if copy_response is None: raise TestExecError("copy object from version id failed") # current_version_id = copy_response['VersionID'] log.info('current_version_id: %s' % s3_obj.version_id) # delete the version_id_to_copy object s3_obj.delete(VersionId=version_id_to_copy) log.info('all versions for the object after the copy operation: %s\n' % s3_object_name) for version in versions: log.info( 'key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) # log.info('downloading current s3object: %s' % s3_object_name) # s3_obj.download_file(s3_object_name + ".download") if config.test_ops['delete_object_versions'] is True: log.info('deleting s3_obj keys and its versions') s3_obj = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, s3_object_name]}) log.info('deleting versions for s3 obj: %s' % s3_object_name) for version in versions: log.info('trying to delete obj version: %s' % version.version_id) del_obj_version = s3lib.resource_op({'obj': s3_obj, 'resource': 'delete', 'kwargs': dict(VersionId=version.version_id)}) log.info('response:\n%s' % del_obj_version) if del_obj_version is not None: response = HttpResponseParser(del_obj_version) if response.status_code == 204: log.info('version deleted ') write_key_io_info.delete_version_info(each_user['access_key'], bucket.name, s3_object_path, version.version_id) else: raise TestExecError("version deletion failed") else: raise TestExecError("version deletion failed") log.info('available versions for the object') versions = bucket.object_versions.filter(Prefix=s3_object_name) for version in versions: log.info('key_name: %s --> version_id: %s' % ( version.object_key, version.version_id)) if config.test_ops.get('delete_from_extra_user') is True: log.info('trying to delete objects from extra user') s3_obj = s3lib.resource_op({'obj': extra_user_conn, 'resource': 'Object', 'args': [bucket.name, s3_object_name]}) log.info('deleting versions for s3 obj: %s' % s3_object_name) for version in versions: log.info('trying to delete obj version: %s' % version.version_id) del_obj_version = s3lib.resource_op({'obj': s3_obj, 'resource': 'delete', 'kwargs': dict( VersionId=version.version_id)}) log.info('response:\n%s' % del_obj_version) if del_obj_version is not False: response = HttpResponseParser(del_obj_version) if response.status_code == 204: log.info('version deleted ') write_key_io_info.delete_version_info(each_user['access_key'], bucket.name, s3_object_path, version.version_id) raise TestExecError("version and deleted, this should not happen") else: log.info('version did not delete, expected behaviour') else: log.info('version did not delete, expected behaviour') if config.local_file_delete is True: log.info('deleting local file') utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path) if config.test_ops['suspend_version'] is True: log.info('suspending versioning') # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend') suspend_version_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'suspend', 'args': None}) response = HttpResponseParser(suspend_version_status) if response.status_code == 200: log.info('versioning suspended') write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name, VERSIONING_STATUS['SUSPENDED']) else: raise TestExecError("version suspend failed") # getting all objects in the bucket log.info('getting all objects in the bucket') objects = s3lib.resource_op({'obj': bucket, 'resource': 'objects', 'args': None}) log.info('objects :%s' % objects) all_objects = s3lib.resource_op({'obj': objects, 'resource': 'all', 'args': None}) log.info('all objects: %s' % all_objects) log.info('all objects2 :%s ' % bucket.objects.all()) for obj in all_objects: log.info('object_name: %s' % obj.key) versions = bucket.object_versions.filter(Prefix=obj.key) log.info('displaying all versions of the object') for version in versions: log.info( 'key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) if config.test_ops.get('suspend_from_extra_user') is True: log.info('suspending versioning from extra user') # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend') bucket_versioning = s3lib.resource_op({'obj': extra_user_conn, 'resource': 'BucketVersioning', 'args': [bucket.name]}) suspend_version_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'suspend', 'args': None}) if suspend_version_status is not False: response = HttpResponseParser(suspend_version_status) if response.status_code == 200: log.info('versioning suspended') write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name, VERSIONING_STATUS['SUSPENDED']) raise TestExecError('version suspended, this should not happen') else: log.info('versioning not suspended, expected behaviour') if config.test_ops.get('upload_after_suspend') is True: log.info('trying to upload after suspending versioning on bucket') for oc, s3_object_size in list(config.mapped_sizes.items()): # non versioning upload s3_object_name = s3_object_names[oc] + ".after_version_suspending" log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append", **{ 'message': '\nhello for non version\n'}) if non_version_data_info is False: TestExecError("data creation failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': each_user['access_key'], 'versioning_status': 'suspended'},**non_version_data_info) s3_obj = s3lib.resource_op({'obj': bucket, 'resource': 'Object', 'args': [s3_object_name], 'extra_info': upload_info}) object_uploaded_status = s3lib.resource_op({'obj': s3_obj, 'resource': 'upload_file', 'args': [non_version_data_info['name']], 'extra_info': upload_info}) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') s3_obj = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, s3_object_name]}) log.info('version_id: %s' % s3_obj.version_id) if s3_obj.version_id is None: log.info('Versions are not created after suspending') else: raise TestExecError('Versions are created even after suspending') s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download") object_downloaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'download_file', 'args': [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError("Resource execution failed: object download failed") if object_downloaded_status is None: log.info('object downloaded') # checking md5 of the downloaded file s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path) log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5) log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5']) if config.local_file_delete is True: utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
def test_exec(config): test_info = AddTestInfo('test versioning with objects') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) write_bucket_io_info = BucketIoInfo() write_key_io_info = KeyIoInfo() try: test_info.started_info() version_count = 3 # create user s3_user = s3lib.create_users(1)[0] # authenticate auth = Auth(s3_user, ssl=config.ssl) rgw_conn = auth.do_auth() b1_name = 'bucky.1e' # bucket 1 b1_k1_name = b1_name + ".key.1" # key1 b1_k2_name = b1_name + ".key.2" # key2 b2_name = 'bucky.2e' # bucket 2 b2_k1_name = b2_name + ".key.1" # key1 b2_k2_name = b2_name + ".key.2" # key2 b1 = resuables.create_bucket(b1_name, rgw_conn, s3_user) b2 = resuables.create_bucket(b2_name, rgw_conn, s3_user) # enable versioning on b1 resuables.enable_versioning(b1, rgw_conn, s3_user, write_bucket_io_info) # upload object to version enabled bucket b1 obj_sizes = list(config.mapped_sizes.values()) config.obj_size = obj_sizes[0] for vc in range(version_count): resuables.upload_object(b1_k1_name, b1, TEST_DATA_PATH, config, s3_user, append_data=True, append_msg='hello vc count: %s' % str(vc)) # upload object to non version bucket b2 config.obj_size = obj_sizes[1] resuables.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config, s3_user) # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present b1_k2 = s3lib.resource_op({ 'obj': rgw_conn, 'resource': 'Object', 'args': [b1.name, b1_k2_name] }) b2_k2 = s3lib.resource_op({ 'obj': rgw_conn, 'resource': 'Object', 'args': [b2.name, b2_k2_name] }) log.info( 'copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket' ) copy_response = b1_k2.copy_from(CopySource={ 'Bucket': b2.name, 'Key': b2_k1_name, }) log.info('copy_response: %s' % copy_response) if copy_response is None: raise TestExecError("copy object failed") log.info('checking if copies object has version id created') b1_k2_version_id = b1_k2.version_id log.info('version id: %s' % b1_k2_version_id) if b1_k2_version_id is None: raise TestExecError( 'Version ID not created for the copied object on to the versioned enabled bucket' ) else: log.info( 'Version ID created for the copied object on to the versioned bucket' ) all_objects_in_b1 = b1.objects.all() log.info('all objects in bucket 1') for obj in all_objects_in_b1: log.info('object_name: %s' % obj.key) versions = b1.object_versions.filter(Prefix=obj.key) log.info('displaying all versions of the object') for version in versions: log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) log.info('-------------------------------------------') log.info( 'copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket') copy_response = b2_k2.copy_from(CopySource={ 'Bucket': b1.name, 'Key': b1_k1_name, }) log.info('copy_response: %s' % copy_response) if copy_response is None: raise TestExecError("copy object failed") log.info('checking if copies object has version id created') b2_k2_version_id = b2_k2.version_id log.info('version id: %s' % b2_k2_version_id) if b2_k2_version_id is None: log.info( 'Version ID not created for the copied object on to the non versioned bucket' ) else: raise TestExecError( 'Version ID created for the copied object on to the non versioned bucket' ) all_objects_in_b2 = b2.objects.all() log.info('all objects in bucket 2') for obj in all_objects_in_b2: log.info('object_name: %s' % obj.key) versions = b2.object_versions.filter(Prefix=obj.key) log.info('displaying all versions of the object') for version in versions: log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() # authenticate sns client. rgw_sns_conn = auth.do_auth_sns_client() # authenticate with s3 client rgw_s3_client = auth.do_auth_using_client() # get ceph version ceph_version_id, ceph_version_name = utils.get_ceph_version() objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) # create topic with endpoint if config.test_ops["create_topic"] is True: endpoint = config.test_ops.get("endpoint") ack_type = config.test_ops.get("ack_type") topic_id = str(uuid.uuid4().hex[:16]) persistent = False topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id log.info( f"creating a topic with {endpoint} endpoint with ack type {ack_type}" ) if config.test_ops.get("persistent_flag", False): log.info("topic with peristent flag enabled") persistent = config.test_ops.get("persistent_flag") topic = notification.create_topic(rgw_sns_conn, endpoint, ack_type, topic_name, persistent) # get topic attributes if config.test_ops.get("get_topic_info", False): log.info("get topic attributes") get_topic_info = notification.get_topic( rgw_sns_conn, topic, ceph_version_name) # put bucket notification with topic configured for event if config.test_ops["put_get_bucket_notification"] is True: event = config.test_ops.get("event_type") notification_name = "notification-" + str(event) notification.put_bucket_notification( rgw_s3_client, bucket_name_to_create, notification_name, topic, event, ) # get bucket notification log.info( f"get bucket notification for bucket : {bucket_name_to_create}" ) notification.get_bucket_notification( rgw_s3_client, bucket_name_to_create) # create objects if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) # copy objects if config.test_ops.get("copy_object", False): log.info("copy object") status = rgw_s3_client.copy_object( Bucket=bucket_name_to_create, Key="copy_of_object" + s3_object_name, CopySource={ "Bucket": bucket_name_to_create, "Key": s3_object_name, }, ) if status is None: raise TestExecError("copy object failed") # delete objects if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, each_user) else: reusable.delete_objects(bucket) # start kafka broker and consumer event_record_path = "/home/cephuser/event_record" start_consumer = notification.start_kafka_broker_consumer( topic_name, event_record_path) if start_consumer is False: raise TestExecError("Kafka consumer not running") # verify all the attributes of the event record. if event not received abort testcase log.info("verify event record attributes") verify = notification.verify_event_record(event, bucket_name_to_create, event_record_path, ceph_version_name) if verify is False: raise EventRecordDataError( "Event record is empty! notification is not seen") # delete topic logs on kafka broker notification.del_topic_from_kafka_broker(topic_name) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get("encryption_algorithm", None) is not None: log.info("encryption enabled, making ceph config changes") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) if config.test_ops["create_object"] is True: if config.test_ops["object_structure"] == "flat": # uploading data log.info("top level s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join( TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get( "upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) objects_created_list.append( (s3_object_name, s3_object_path)) # deleting the local file created after upload if config.local_file_delete is True: log.info( "deleting local file created after the upload" ) utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72 if config.test_ops["object_structure"] == "pseudo": log.info( f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each" ) for count in range(config.pseudo_dir_count): s3_pseudo_dir_name = utils.gen_s3_object_name( bucket_name_to_create, count) s3_object_path = os.path.join( TEST_DATA_PATH, s3_pseudo_dir_name) manage_data.pseudo_dir_generator(s3_object_path) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_pseudo_object_name( s3_pseudo_dir_name, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join( TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get( "upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) # deleting the local file created after upload if config.local_file_delete is True: log.info( "deleting local file created after the upload" ) utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0 if config.test_ops["create_object"] is False: if config.test_ops[ "object_structure"] == "pseudo-dir-only": log.info( f"pseudo directories to create {config.pseudo_dir_count}" ) for count in range(config.pseudo_dir_count): s3_pseudo_dir_name = utils.gen_s3_object_name( bucket_name_to_create, count) utils.create_psuedo_dir(s3_pseudo_dir_name, bucket) # radoslist listing of the bucket if config.test_ops["radoslist"] is True: log.info( "executing the command radosgw-admin bucket radoslist " ) radoslist = utils.exec_shell_cmd( "radosgw-admin bucket radoslist --bucket %s" % bucket_name_to_create) if radoslist is False: raise TestExecError( "Radoslist command execution failed") # get the configuration parameter - rgw_bucket_index_max_aio ceph_version_id, ceph_version_name = utils.get_ceph_version() if ceph_version_name in ["luminous", "nautilus"]: cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep rgw_bucket_index_max_aio" max_aio_output = utils.exec_shell_cmd(cmd) max_aio = max_aio_output.split()[1] else: cmd = "ceph config get mon rgw_bucket_index_max_aio" max_aio_output = utils.exec_shell_cmd(cmd) max_aio = max_aio_output.rstrip("\n") # bucket stats to get the num_objects of the bucket bucket_stats = utils.exec_shell_cmd( "radosgw-admin bucket stats --bucket %s" % bucket_name_to_create) bucket_stats_json = json.loads(bucket_stats) bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][ "num_objects"] # ordered listing via radosgw-admin command and noting time taken log.info( "measure the execution time taken to list via radosgw-admin command" ) if config.test_ops["radosgw_listing_ordered"] is True: log.info("ordered listing via radosgw-admin command") rgw_cmd_time = reusable.time_to_list_via_radosgw( bucket_name_to_create, "ordered") if rgw_cmd_time > 0: rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time) rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60) log.info( f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins" ) else: raise TestExecError( "object listing via radosgw-admin command failed") # unordered listing via radosgw-admin command and noting time taken if config.test_ops["radosgw_listing_ordered"] is False: log.info("unordered listing via radosgw-admin command") rgw_time = reusable.time_to_list_via_radosgw( bucket_name_to_create, "unordered") if rgw_time > 0: rgw_time_secs = "{:.4f}".format(rgw_time) rgw_time_mins = "{:.4f}".format(rgw_time / 60) log.info( f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins" ) else: raise TestExecError( "object listing via radosgw-admin command failed") # listing via boto and noting the time taken log.info("measure the execution time taken to list via boto") boto_time = reusable.time_to_list_via_boto( bucket_name_to_create, rgw_conn) if boto_time > 0: boto_time_secs = "{:.4f}".format(boto_time) boto_time_mins = "{:.4f}".format(boto_time / 60) log.info( f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins" ) else: raise TestExecError("object listing via boto failed") # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265 if config.radoslist_all is True: log.info( "Executing the command radosgw-admin bucket radoslist on all buckets" ) cmd = "radosgw-admin bucket radoslist | grep ERROR" radoslist_all_error = utils.exec_shell_cmd(cmd) if radoslist_all_error: raise TestExecError("ERROR in radoslist command") if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, each_user) else: reusable.delete_objects(bucket) time.sleep(30) reusable.delete_bucket(bucket) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") if config.user_remove is True: reusable.remove_user(each_user)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # check the default data log backing default_data_log = reusable.get_default_datalog_type() log.info(f"{default_data_log} is the default data log backing") # check sync status if a multisite cluster reusable.check_sync_status() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() objects_created_list = [] # change the default datalog backing to FIFO if config.test_ops.get("change_datalog_backing", False): logtype = config.test_ops["change_datalog_backing"] log.info(f"change default datalog backing to {logtype}") cmd = f"radosgw-admin datalog type --log-type={logtype}" change_datalog_type = utils.exec_shell_cmd(cmd) if change_datalog_type is False: raise TestExecError("Failed to change the datalog type to fifo") log.info( "restart the rgw daemons and sleep of 30secs for rgw daemon to be up " ) srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket( bucket_name_to_create, rgw_conn, each_user ) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning( bucket, rgw_conn, each_user, write_bucket_io_info ) if config.test_ops["create_object"] is True: # uploading data log.info( "top level s3 objects to create: %s" % config.objects_count ) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc ) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) objects_created_list.append((s3_object_name, s3_object_path)) # deleting the local file created after upload if config.local_file_delete is True: log.info("deleting local file created after the upload") utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # delete object and bucket if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object( bucket, name, path, rgw_conn, each_user ) else: reusable.delete_objects(bucket) time.sleep(30) reusable.delete_bucket(bucket) # check for any ERRORs in datalog list. ref- https://bugzilla.redhat.com/show_bug.cgi?id=1917687 error_in_data_log_list = reusable.check_datalog_list() if error_in_data_log_list: raise TestExecError("Error in datalog list") # check for data log markers. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1831798#c22 data_log_marker = reusable.check_datalog_marker() log.info(f"The data_log_marker is: {data_log_marker}") # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash()
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get("encryption_algorithm", None) is not None: log.info("encryption enabled, making ceph config changes") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) # ,config=config) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() user_id = each_user["user_id"] # Creating a seed for multi-factor authentication log.info("Creating a seed for multi-factor authentication") cmd = "head -10 /dev/urandom | sha512sum | cut -b 1-30" SEED = utils.exec_shell_cmd(cmd) log.info( "Configure the one-time password generator oathtool and the back-end MFA system to use the same seed." ) test_totp = reusable.generate_totp(SEED) serial = "MFAtest" + str(random.randrange(1, 100)) if test_totp is False: raise TestExecError( "Failed to configure one-time password generator - oathtool") if config.test_ops["mfa_create"] is True: log.info("Create a new MFA TOTP token") cmd = f"time radosgw-admin mfa create --uid={user_id} --totp-serial={serial} --totp-seed={SEED}" mfa_create = utils.exec_shell_cmd(cmd) if mfa_create is False: raise TestExecError("Failed to create new MFA TOTP token!") # Verify no crash is seen with in correct syntax for mfa resync command BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1947862 if config.test_ops.get("mfa_resync_invalid_syntax") is True: log.info( "Validate the mfa resync command errors out with approriate message on invalid syntax" ) get_totp = reusable.generate_totp(SEED) cmd = f"radosgw-admin mfa resync --uid {user_id} --totp-serial={serial} --totp-seed={SEED} --totp-pin={get_totp}" mfa_resync_invalid_syntax = utils.exec_shell_cmd(cmd) if mfa_resync_invalid_syntax is False: log.info("appropriate usage message displayed") else: raise TestExecError("Usage message not displayed") if config.test_ops["mfa_check"] is True: log.info( "Test a multi-factor authentication (MFA) time-based one time password (TOTP) token." ) get_totp = reusable.generate_totp(SEED) cmd = ( "time radosgw-admin mfa check --uid=%s --totp-serial=%s --totp-pin=%s" % (each_user["user_id"], serial, get_totp)) cmd = f"time radosgw-admin mfa check --uid={user_id} --totp-serial={serial} --totp-pin={get_totp}" mfa_check = utils.exec_shell_cmd(cmd) if mfa_check is False: log.info( "Resynchronize a multi-factor authentication TOTP token in case of time skew or failed checks." ) previous_pin = reusable.generate_totp(SEED) log.info("Sleep of 30 seconds to fetch another totp") time.sleep(30) current_pin = reusable.generate_totp(SEED) cmd = "time radosgw-admin mfa resync --uid {user_id} --totp-serial {serial} --totp-pin {get_totp} --totp-pin {get_totp}" mfa_resync = utils.exec_shell_cmd(cmd) if mfa_resync is False: raise TestExecError("Failed to resync token") log.info( "Verify the token was successfully resynchronized by testing a new PIN" ) get_totp = reusable.generate_totp(SEED) cmd = f"time radosgw-admin mfa check --uid {user_id} --totp-serial {serial} --totp-pin {get_totp}" mfa_check_resync = utils.exec_shell_cmd(cmd) if "ok" not in mfa_check_resync: raise TestExecError("Failed to verify resync token") if config.test_ops["mfa_list"] is True: log.info("List MFA TOTP tokens") cmd = f"radosgw-admin mfa list --uid {user_id}" mfa_list = utils.exec_shell_cmd(cmd) if "MFAtest" in mfa_list: log.info("MFA token is listed for the given user") objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_mfa_version", False): log.info("enable bucket versioning and MFA deletes") token, status = reusable.enable_mfa_versioning( bucket, rgw_conn, SEED, serial, each_user, write_bucket_io_info) if status is False: log.info( "trying again! AccessDenied could be a timing issue!" ) new_token = reusable.generate_totp(SEED) if token == new_token: log.info( "sleep of 30secs to generate another TOTP token" ) time.sleep(30) status = reusable.enable_mfa_versioning( bucket, rgw_conn, SEED, serial, each_user, write_bucket_io_info, ) if status is False: raise MFAVersionError( "Failed to enable MFA and versioning on the bucket!" ) if config.test_ops["create_object"] is True: # uploading data log.info( f"top level s3 objects to create: {config.objects_count}" ) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info(f"s3 object name: {s3_object_name}") s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info(f"s3 object path: {s3_object_path}") if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) objects_created_list.append( (s3_object_name, s3_object_path)) # deleting the local file created after upload if config.local_file_delete is True: log.info( "deleting local file created after the upload") cmd = f"rm -rf {s3_object_path}" utils.exec_shell_cmd(cmd) # bucket list to check the objects cmd = f"radosgw-admin bucket list --bucket {bucket_name_to_create}" bucket_list = utils.exec_shell_cmd(cmd) if config.test_ops["delete_mfa_object"] is True: for s3_object_name, path in objects_created_list: log.info( "Deleting an object configured with MFA should have TOTP token" ) versions = bucket.object_versions.filter( Prefix=s3_object_name) for version in versions: log.info( f"key_name: {version.object_key} --> version_id: {version.version_id}" ) log.info("Deleting the object with TOTP token") get_totp = reusable.generate_totp(SEED) cmd = f"radosgw-admin object rm --object {version.object_key} --object-version {version.version_id} --totp-pin {get_totp} --bucket {bucket_name_to_create}" object_delete = utils.exec_shell_cmd(cmd) if object_delete is False: raise TestExecError( "Object deletion with MFA token failed!") log.info("Verify object is deleted permanently") cmd = f"radosgw-admin bucket list --bucket {bucket_name_to_create}" bucket_list = utils.exec_shell_cmd(cmd) if version.version_id in bucket_list: raise TestExecError( "Object version is still present, Failed to delete the object" ) if config.test_ops["delete_bucket"] is True: log.info(f"Deleting the bucket {bucket_name_to_create}") time.sleep(10) reusable.delete_bucket(bucket) if config.test_ops["remove_mfa"] is True: log.info( "Delete a multi-factor authentication (MFA) time-based one time password (TOTP) token" ) cmd = f"radosgw-admin mfa remove --uid {user_id} --totp-serial {serial}" mfa_remove = utils.exec_shell_cmd(cmd) if mfa_remove is False: raise TestExecError("MFA delete failed") log.info("Verify the MFA token is deleted") cmd = ( f"radosgw-admin mfa get --uid {user_id} --totp-serial {serial}" ) mfa_get = utils.exec_shell_cmd(cmd) if mfa_get is False: log.info("MFA token successfully deleted for user") else: raise TestExecError("MFA token delete for user failed") # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() write_key_io_info = KeyIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) # create user all_users_info = s3lib.create_users(config.user_count) extra_user = s3lib.create_users(1)[0] extra_user_auth = Auth(extra_user, ssl=config.ssl) extra_user_conn = extra_user_auth.do_auth() for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() s3_object_names = [] # create buckets log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({ "obj": rgw_conn, "resource": "Bucket", "args": [bucket_name_to_create] }) # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']}) created = s3lib.resource_op({ "obj": bucket, "resource": "create", "args": None, "extra_info": { "access_key": each_user["access_key"] }, }) if created is False: raise TestExecError( "Resource execution failed: bucket creation faield") if created is not None: response = HttpResponseParser(created) if response.status_code == 200: log.info("bucket created") else: raise TestExecError("bucket creation failed") else: raise TestExecError("bucket creation failed") # getting bucket version object if config.test_ops["enable_version"] is True: log.info("bucket versionig test on bucket: %s" % bucket.name) # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name) bucket_versioning = s3lib.resource_op({ "obj": rgw_conn, "resource": "BucketVersioning", "args": [bucket.name], }) # checking the versioning status # version_status = s3_ops.resource_op(bucket_versioning, 'status') version_status = s3lib.resource_op({ "obj": bucket_versioning, "resource": "status", "args": None }) if version_status is None: log.info("bucket versioning still not enabled") # enabling bucket versioning # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable') version_enable_status = s3lib.resource_op({ "obj": bucket_versioning, "resource": "enable", "args": None, }) response = HttpResponseParser(version_enable_status) if response.status_code == 200: log.info("version enabled") write_bucket_io_info.add_versioning_status( each_user["access_key"], bucket.name, VERSIONING_STATUS["ENABLED"], ) else: raise TestExecError("version enable failed") if config.objects_count > 0: log.info("s3 objects to create: %s" % config.objects_count) for oc, s3_object_size in list( config.mapped_sizes.items()): # versioning upload s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, str(oc)) s3_object_names.append(s3_object_name) log.info("s3 object name: %s" % s3_object_name) log.info("versioning count: %s" % config.version_count) s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, str(oc)) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) original_data_info = manage_data.io_generator( s3_object_path, s3_object_size) if original_data_info is False: TestExecError("data creation failed") created_versions_count = 0 for vc in range(config.version_count): log.info("version count for %s is %s" % (s3_object_name, str(vc))) log.info("modifying data: %s" % s3_object_name) modified_data_info = manage_data.io_generator( s3_object_path, s3_object_size, op="append", **{ "message": "\nhello for version: %s\n" % str(vc) }) if modified_data_info is False: TestExecError("data modification failed") log.info("uploading s3 object: %s" % s3_object_path) upload_info = dict( { "access_key": each_user["access_key"], "versioning_status": VERSIONING_STATUS["ENABLED"], "version_count_no": vc, }, **modified_data_info) s3_obj = s3lib.resource_op({ "obj": bucket, "resource": "Object", "args": [s3_object_name], "extra_info": upload_info, }) object_uploaded_status = s3lib.resource_op({ "obj": s3_obj, "resource": "upload_file", "args": [modified_data_info["name"]], "extra_info": upload_info, }) if object_uploaded_status is False: raise TestExecError( "Resource execution failed: object upload failed" ) if object_uploaded_status is None: log.info("object uploaded") s3_obj = rgw_conn.Object( bucket.name, s3_object_name) log.info("current_version_id: %s" % s3_obj.version_id) key_version_info = basic_io_structure.version_info( **{ "version_id": s3_obj.version_id, "md5_local": upload_info["md5"], "count_no": vc, "size": upload_info["size"], }) log.info("key_version_info: %s" % key_version_info) write_key_io_info.add_versioning_info( each_user["access_key"], bucket.name, s3_object_path, key_version_info, ) created_versions_count += 1 log.info("created_versions_count: %s" % created_versions_count) log.info("adding metadata") metadata1 = { "m_data1": "this is the meta1 for this obj" } s3_obj.metadata.update(metadata1) metadata2 = { "m_data2": "this is the meta2 for this obj" } s3_obj.metadata.update(metadata2) log.info("metadata for this object: %s" % s3_obj.metadata) log.info("metadata count for object: %s" % (len(s3_obj.metadata))) if not s3_obj.metadata: raise TestExecError( "metadata not created even adding metadata" ) versions = bucket.object_versions.filter( Prefix=s3_object_name) created_versions_count_from_s3 = len( [v.version_id for v in versions]) log.info("created versions count on s3: %s" % created_versions_count_from_s3) if (created_versions_count is created_versions_count_from_s3): log.info( "no new versions are created when added metdata" ) else: raise TestExecError( "version count missmatch, " "possible creation of version on adding metadata" ) s3_object_download_path = os.path.join( TEST_DATA_PATH, s3_object_name + ".download") object_downloaded_status = s3lib.resource_op({ "obj": bucket, "resource": "download_file", "args": [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError( "Resource execution failed: object download failed" ) if object_downloaded_status is None: log.info("object downloaded") # checking md5 of the downloaded file s3_object_downloaded_md5 = utils.get_md5( s3_object_download_path) log.info("downloaded_md5: %s" % s3_object_downloaded_md5) log.info("uploaded_md5: %s" % modified_data_info["md5"]) # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path) log.info("all versions for the object: %s\n" % s3_object_name) versions = bucket.object_versions.filter( Prefix=s3_object_name) for version in versions: log.info("key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) if config.test_ops.get("set_acl", None) is True: s3_obj_acl = s3lib.resource_op({ "obj": rgw_conn, "resource": "ObjectAcl", "args": [bucket.name, s3_object_name], }) # setting acl to private, just need to set to any acl and # check if its set - check by response code acls_set_status = s3_obj_acl.put(ACL="private") response = HttpResponseParser(acls_set_status) if response.status_code == 200: log.info("ACLs set") else: raise TestExecError("Acls not Set") # get obj details based on version id for version in versions: log.info("getting info for version id: %s" % version.version_id) obj = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [bucket.name, s3_object_name], }) log.info( "obj get detils :%s\n" % (obj.get(VersionId=version.version_id))) if config.test_ops["copy_to_version"] is True: # reverting object to one of the versions ( randomly chosen ) version_id_to_copy = random.choice( [v.version_id for v in versions]) log.info("version_id_to_copy: %s" % version_id_to_copy) s3_obj = rgw_conn.Object(bucket.name, s3_object_name) log.info("current version_id: %s" % s3_obj.version_id) copy_response = s3_obj.copy_from( CopySource={ "Bucket": bucket.name, "Key": s3_object_name, "VersionId": version_id_to_copy, }) log.info("copy_response: %s" % copy_response) if copy_response is None: raise TestExecError( "copy object from version id failed") # current_version_id = copy_response['VersionID'] log.info("current_version_id: %s" % s3_obj.version_id) # delete the version_id_to_copy object s3_obj.delete(VersionId=version_id_to_copy) log.info( "all versions for the object after the copy operation: %s\n" % s3_object_name) for version in versions: log.info( "key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) # log.info('downloading current s3object: %s' % s3_object_name) # s3_obj.download_file(s3_object_name + ".download") if config.test_ops["delete_object_versions"] is True: log.info("deleting s3_obj keys and its versions") s3_obj = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [bucket.name, s3_object_name], }) log.info("deleting versions for s3 obj: %s" % s3_object_name) for version in versions: log.info("trying to delete obj version: %s" % version.version_id) del_obj_version = s3lib.resource_op({ "obj": s3_obj, "resource": "delete", "kwargs": dict(VersionId=version.version_id), }) log.info("response:\n%s" % del_obj_version) if del_obj_version is not None: response = HttpResponseParser( del_obj_version) if response.status_code == 204: log.info("version deleted ") reusable.delete_version_object( bucket, version.version_id, s3_object_path, rgw_conn, each_user, ) else: raise TestExecError( "version deletion failed") else: raise TestExecError( "version deletion failed") log.info("available versions for the object") versions = bucket.object_versions.filter( Prefix=s3_object_name) for version in versions: log.info( "key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) if config.test_ops.get( "delete_from_extra_user") is True: log.info( "trying to delete objects from extra user") s3_obj = s3lib.resource_op({ "obj": extra_user_conn, "resource": "Object", "args": [bucket.name, s3_object_name], }) log.info("deleting versions for s3 obj: %s" % s3_object_name) for version in versions: log.info("trying to delete obj version: %s" % version.version_id) del_obj_version = s3lib.resource_op({ "obj": s3_obj, "resource": "delete", "kwargs": dict(VersionId=version.version_id), }) log.info("response:\n%s" % del_obj_version) if del_obj_version is not False: response = HttpResponseParser( del_obj_version) if response.status_code == 204: log.info("version deleted ") write_key_io_info.delete_version_info( each_user["access_key"], bucket.name, s3_object_path, version.version_id, ) raise TestExecError( "version and deleted, this should not happen" ) else: log.info( "version did not delete, expected behaviour" ) else: log.info( "version did not delete, expected behaviour" ) if config.local_file_delete is True: log.info("deleting local file") utils.exec_shell_cmd("sudo rm -rf %s" % s3_object_path) if config.test_ops["suspend_version"] is True: log.info("suspending versioning") # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend') suspend_version_status = s3lib.resource_op({ "obj": bucket_versioning, "resource": "suspend", "args": None }) response = HttpResponseParser(suspend_version_status) if response.status_code == 200: log.info("versioning suspended") write_bucket_io_info.add_versioning_status( each_user["access_key"], bucket.name, VERSIONING_STATUS["SUSPENDED"], ) else: raise TestExecError("version suspend failed") # getting all objects in the bucket log.info("getting all objects in the bucket") objects = s3lib.resource_op({ "obj": bucket, "resource": "objects", "args": None }) log.info("objects :%s" % objects) all_objects = s3lib.resource_op({ "obj": objects, "resource": "all", "args": None }) log.info("all objects: %s" % all_objects) log.info("all objects2 :%s " % bucket.objects.all()) for obj in all_objects: log.info("object_name: %s" % obj.key) versions = bucket.object_versions.filter( Prefix=obj.key) log.info("displaying all versions of the object") for version in versions: log.info("key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) if config.test_ops.get("suspend_from_extra_user") is True: log.info("suspending versioning from extra user") # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend') bucket_versioning = s3lib.resource_op({ "obj": extra_user_conn, "resource": "BucketVersioning", "args": [bucket.name], }) suspend_version_status = s3lib.resource_op({ "obj": bucket_versioning, "resource": "suspend", "args": None }) if suspend_version_status is not False: response = HttpResponseParser(suspend_version_status) if response.status_code == 200: log.info("versioning suspended") write_bucket_io_info.add_versioning_status( each_user["access_key"], bucket.name, VERSIONING_STATUS["SUSPENDED"], ) raise TestExecError( "version suspended, this should not happen") else: log.info( "versioning not suspended, expected behaviour") if config.test_ops.get("upload_after_suspend") is True: log.info( "trying to upload after suspending versioning on bucket") for oc, s3_object_size in list(config.mapped_sizes.items()): # non versioning upload s3_object_name = s3_object_names[ oc] + ".after_version_suspending" log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) non_version_data_info = manage_data.io_generator( s3_object_path, s3_object_size, op="append", **{"message": "\nhello for non version\n"}) if non_version_data_info is False: TestExecError("data creation failed") log.info("uploading s3 object: %s" % s3_object_path) upload_info = dict( { "access_key": each_user["access_key"], "versioning_status": "suspended", }, **non_version_data_info) s3_obj = s3lib.resource_op({ "obj": bucket, "resource": "Object", "args": [s3_object_name], "extra_info": upload_info, }) object_uploaded_status = s3lib.resource_op({ "obj": s3_obj, "resource": "upload_file", "args": [non_version_data_info["name"]], "extra_info": upload_info, }) if object_uploaded_status is False: raise TestExecError( "Resource execution failed: object upload failed") if object_uploaded_status is None: log.info("object uploaded") s3_obj = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [bucket.name, s3_object_name], }) log.info("version_id: %s" % s3_obj.version_id) if s3_obj.version_id is None: log.info("Versions are not created after suspending") else: raise TestExecError( "Versions are created even after suspending") s3_object_download_path = os.path.join( TEST_DATA_PATH, s3_object_name + ".download") object_downloaded_status = s3lib.resource_op({ "obj": bucket, "resource": "download_file", "args": [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError( "Resource execution failed: object download failed" ) if object_downloaded_status is None: log.info("object downloaded") # checking md5 of the downloaded file s3_object_downloaded_md5 = utils.get_md5( s3_object_download_path) log.info("s3_object_downloaded_md5: %s" % s3_object_downloaded_md5) log.info("s3_object_uploaded_md5: %s" % non_version_data_info["md5"]) if config.local_file_delete is True: utils.exec_shell_cmd("sudo rm -rf %s" % s3_object_path) if config.test_ops.get("delete_bucket") is True: reusable.delete_bucket(bucket) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() log.info("starting IO") config.user_count = 1 user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() log.info("sharding configuration will be added now.") if config.sharding_type == "dynamic": log.info("sharding type is dynamic") # for dynamic, # the number of shards should be greater than [ (no of objects)/(max objects per shard) ] # example: objects = 500 ; max object per shard = 10 # then no of shards should be at least 50 or more time.sleep(15) log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard), ) ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_dynamic_resharding, "True") num_shards_expected = config.objects_count / config.max_objects_per_shard log.info("num_shards_expected: %s" % num_shards_expected) log.info("trying to restart services ") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") config.bucket_count = 1 objects_created_list = [] log.info("no of buckets to create: %s" % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1) bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) if config.test_ops.get("enable_version", False): reusable.upload_version_object( config, user_info, rgw_conn, s3_object_name, config.obj_size, bucket, TEST_DATA_PATH, ) else: reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) objects_created_list.append((s3_object_name, s3_object_path)) if config.sharding_type == "manual": log.info("sharding type is manual") # for manual. # the number of shards will be the value set in the command. time.sleep(15) log.info("in manual sharding") cmd_exec = utils.exec_shell_cmd( "radosgw-admin bucket reshard --bucket=%s --num-shards=%s " "--yes-i-really-mean-it" % (bucket.name, config.shards)) if cmd_exec is False: raise TestExecError("manual resharding command execution failed") sleep_time = 600 log.info(f"verification starts after waiting for {sleep_time} seconds") time.sleep(sleep_time) op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket %s" % bucket.name) json_doc = json.loads(op) num_shards_created = json_doc["num_shards"] log.info("no_of_shards_created: %s" % num_shards_created) if config.sharding_type == "manual": if config.shards != num_shards_created: raise TestExecError("expected number of shards not created") log.info("Expected number of shards created") if config.sharding_type == "dynamic": log.info("Verify if resharding list is empty") reshard_list_op = json.loads( utils.exec_shell_cmd("radosgw-admin reshard list")) if not reshard_list_op: log.info( "for dynamic number of shards created should be greater than or equal to number of expected shards" ) log.info("no_of_shards_expected: %s" % num_shards_expected) if int(num_shards_created) >= int(num_shards_expected): log.info("Expected number of shards created") else: raise TestExecError("Expected number of shards not created") if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, user_info) else: reusable.delete_objects(bucket) reusable.delete_bucket(bucket) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 30 config.rgw_lc_max_worker = 10 log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) ceph_version = utils.exec_shell_cmd("ceph version") op = ceph_version.split() for i in op: if i == 'nautilus': ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker)) log.info('trying to restart services') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') config.user_count = 1 config.bucket_count = 1 # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info('no of buckets to create: %s' % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1) obj_list = [] obj_tag = 'suffix1=WMV1' bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) prefix = list(map(lambda x: x, [rule['Filter'].get('Prefix') or rule['Filter']['And'].get('Prefix') for rule in config.lifecycle_conf])) prefix = prefix if prefix else ['dummy1'] if config.test_ops['enable_versioning'] is True: reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) if config.test_ops['create_object'] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + '.' + bucket.name + '.' + str(oc) obj_list.append(s3_object_name) if config.test_ops['version_count'] > 0: for vc in range(config.test_ops['version_count']): log.info('version count for %s is %s' % (s3_object_name, str(vc))) log.info('modifying data: %s' % s3_object_name) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=True, append_msg='hello object for version: %s\n' % str(vc)) else: log.info('s3 objects to create: %s' % config.objects_count) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_prefix_rule(bucket, config) if config.test_ops['delete_marker'] is True: life_cycle_rule_new = {"Rules": config.delete_marker_ops} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config) if config.test_ops['enable_versioning'] is False: if config.test_ops['create_object'] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + '.' + bucket.name + '.' + str(oc) obj_list.append(s3_object_name) reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_and_rule(bucket, config) reusable.remove_user(user_info)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) write_bucket_io_info = BucketIoInfo() write_key_io_info = KeyIoInfo() version_count = 3 # create user s3_user = s3lib.create_users(1)[0] # authenticate auth = Auth(s3_user, ssl=config.ssl) rgw_conn = auth.do_auth() b1_name = utils.gen_bucket_name_from_userid(s3_user["user_id"], rand_no=1) b1_k1_name = b1_name + ".key.1" # key1 b1_k2_name = b1_name + ".key.2" # key2 b2_name = utils.gen_bucket_name_from_userid(s3_user["user_id"], rand_no=2) b2_k1_name = b2_name + ".key.1" # key1 b2_k2_name = b2_name + ".key.2" # key2 b1 = reusable.create_bucket(b1_name, rgw_conn, s3_user) b2 = reusable.create_bucket(b2_name, rgw_conn, s3_user) # enable versioning on b1 reusable.enable_versioning(b1, rgw_conn, s3_user, write_bucket_io_info) # upload object to version enabled bucket b1 obj_sizes = list(config.mapped_sizes.values()) config.obj_size = obj_sizes[0] for vc in range(version_count): reusable.upload_object( b1_k1_name, b1, TEST_DATA_PATH, config, s3_user, append_data=True, append_msg="hello vc count: %s" % str(vc), ) # upload object to non version bucket b2 config.obj_size = obj_sizes[1] reusable.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config, s3_user) # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present b1_k2 = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [b1.name, b1_k2_name] }) b2_k2 = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [b2.name, b2_k2_name] }) log.info( "copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket" ) copy_response = b1_k2.copy_from(CopySource={ "Bucket": b2.name, "Key": b2_k1_name, }) log.info("copy_response: %s" % copy_response) if copy_response is None: raise TestExecError("copy object failed") log.info("checking if copies object has version id created") b1_k2_version_id = b1_k2.version_id log.info("version id: %s" % b1_k2_version_id) if b1_k2_version_id is None: raise TestExecError( "Version ID not created for the copied object on to the versioned enabled bucket" ) else: log.info( "Version ID created for the copied object on to the versioned bucket" ) all_objects_in_b1 = b1.objects.all() log.info("all objects in bucket 1") for obj in all_objects_in_b1: log.info("object_name: %s" % obj.key) versions = b1.object_versions.filter(Prefix=obj.key) log.info("displaying all versions of the object") for version in versions: log.info("key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) log.info("-------------------------------------------") log.info("copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket") copy_response = b2_k2.copy_from(CopySource={ "Bucket": b1.name, "Key": b1_k1_name, }) log.info("copy_response: %s" % copy_response) if copy_response is None: raise TestExecError("copy object failed") log.info("checking if copies object has version id created") b2_k2_version_id = b2_k2.version_id log.info("version id: %s" % b2_k2_version_id) if b2_k2_version_id is None: log.info( "Version ID not created for the copied object on to the non versioned bucket" ) else: raise TestExecError( "Version ID created for the copied object on to the non versioned bucket" ) all_objects_in_b2 = b2.objects.all() log.info("all objects in bucket 2") for obj in all_objects_in_b2: log.info("object_name: %s" % obj.key) versions = b2.object_versions.filter(Prefix=obj.key) log.info("displaying all versions of the object") for version in versions: log.info("key_name: %s --> version_id: %s" % (version.object_key, version.version_id)) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() objects_created_list = [] # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get('encryption_algorithm', None) is not None: log.info('encryption enabled, making ceph config changes') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') # making changes to max_objects_per_shard and rgw_gc_obj_min_wait to ceph.conf log.info('making changes to ceph.conf') log.info(f'rgw_max_objs_per_shard parameter set to {str(config.max_objects_per_shard)}') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard)) ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding, 'True') log.info(f'rgw gc obj min wait configuration parameter set to {str(config.rgw_gc_obj_min_wait)}') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,str(config.rgw_gc_obj_min_wait)) sleep_time = 10 log.info(f'Restarting RGW service and waiting for {sleep_time} seconds') srv_restarted = rgw_service.restart() time.sleep(sleep_time) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{'signature_version': 's3v4'}) else: rgw_conn = auth.do_auth() objects_created_list = [] if config.test_ops['create_bucket'] is True: log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): log.info(f'creating {str(bc)} bucket') bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get('enable_version', False): log.info('enable bucket version') reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) if config.test_ops['create_object'] is True: log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get('enable_version', False): log.info('upload versioned objects') reusable.upload_version_object(config, each_user, rgw_conn, s3_object_name, config.obj_size, bucket, TEST_DATA_PATH) else: log.info('upload type: normal') reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user) objects_created_list.append((s3_object_name, s3_object_path)) #deleting the local file created after upload if config.local_file_delete is True: log.info('deleting local file created after the upload') utils.exec_shell_cmd('rm -rf %s' % s3_object_path) # listing the objects if config.test_ops.get('list_objects', False): if config.test_ops.get('enable_version', False): for name,path in objects_created_list: reusable.list_versioned_objects(bucket,name,path,rgw_conn) else: reusable.list_objects(bucket) if config.test_ops.get('delete_bucket_object', False): if config.test_ops.get('enable_version', False): for name, path in objects_created_list: print("name, path",name,path) versions = bucket.object_versions.filter(Prefix=name) log.info('deleting s3_obj keys and its versions') s3_obj = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, name]}) log.info('deleting versions for s3 obj: %s' % name) for version in versions: log.info('trying to delete obj version: %s' % version.version_id) del_obj_version = s3lib.resource_op({'obj': s3_obj, 'resource': 'delete', 'kwargs': dict(VersionId=version.version_id)}) log.info('response:\n%s' % del_obj_version) if del_obj_version is not None: response = HttpResponseParser(del_obj_version) if response.status_code == 204: log.info('version deleted ') reusable.delete_version_object(bucket,version.version_id, path, rgw_conn, each_user) else: raise TestExecError("version deletion failed") else: raise TestExecError("version deletion failed") else: reusable.delete_objects(bucket) log.info(f'deleting the bucket {bucket_name_to_create}') reusable.delete_bucket(bucket) # check for any crashes during the execution crash_info=reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") #remove the user reusable.remove_user(each_user)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get('encryption_algorithm', None) is not None: log.info('encryption enabled, making ceph config changes') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{'signature_version': 's3v4'}) else: rgw_conn = auth.do_auth() objects_created_list = [] if config.test_ops['create_bucket'] is True: log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get('enable_version', False): log.info('enable bucket version') reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) if config.test_ops['create_object'] is True: if config.test_ops['object_structure'] == 'flat': # uploading data log.info('top level s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join( TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get( 'upload_type') == 'multipart': log.info('upload type: multipart') reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user) else: log.info('upload type: normal') reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user) objects_created_list.append( (s3_object_name, s3_object_path)) #deleting the local file created after upload if config.local_file_delete is True: log.info( 'deleting local file created after the upload' ) utils.exec_shell_cmd('rm -rf %s' % s3_object_path) #this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72 if config.test_ops['object_structure'] == 'pseudo': log.info( f'pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each' ) for count in range(config.pseudo_dir_count): s3_pseudo_dir_name = utils.gen_s3_object_name( bucket_name_to_create, count) s3_object_path = os.path.join( TEST_DATA_PATH, s3_pseudo_dir_name) manage_data.pseudo_dir_generator(s3_object_path) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_pseudo_object_name( s3_pseudo_dir_name, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join( TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get( 'upload_type') == 'multipart': log.info('upload type: multipart') reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user) else: log.info('upload type: normal') reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user) #deleting the local file created after upload if config.local_file_delete is True: log.info( 'deleting local file created after the upload' ) utils.exec_shell_cmd('rm -rf %s' % s3_object_path) # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0 if config.test_ops['create_object'] is False: if config.test_ops[ 'object_structure'] == 'pseudo-dir-only': log.info( f'pseudo directories to create {config.pseudo_dir_count}' ) for count in range(config.pseudo_dir_count): s3_pseudo_dir_name = utils.gen_s3_object_name( bucket_name_to_create, count) utils.create_psuedo_dir(s3_pseudo_dir_name, bucket) # radoslist listing of the bucket if config.test_ops['radoslist'] is True: log.info( 'executing the command radosgw-admin bucket radoslist ' ) radoslist = utils.exec_shell_cmd( "radosgw-admin bucket radoslist --bucket %s" % bucket_name_to_create) if radoslist is False: raise TestExecError( "Radoslist command execution failed") # get the configuration parameter cmd = 'ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep rgw_bucket_index_max_aio' max_aio_output = utils.exec_shell_cmd(cmd) max_aio = max_aio_output.split()[1] # bucket stats to get the num_objects of the bucket bucket_stats = utils.exec_shell_cmd( "radosgw-admin bucket stats --bucket %s" % bucket_name_to_create) bucket_stats_json = json.loads(bucket_stats) bkt_num_objects = bucket_stats_json['usage']['rgw.main'][ 'num_objects'] # ordered listing via radosgw-admin command and noting time taken log.info( 'measure the execution time taken to list via radosgw-admin command' ) if config.test_ops['radosgw_listing_ordered'] is True: log.info('ordered listing via radosgw-admin command') rgw_cmd_time = reusable.time_to_list_via_radosgw( bucket_name_to_create, 'ordered') if rgw_cmd_time > 0: rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time) rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60) log.info( f'with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins' ) else: raise TestExecError( "object listing via radosgw-admin command failed") # unordered listing via radosgw-admin command and noting time taken if config.test_ops['radosgw_listing_ordered'] is False: log.info('unordered listing via radosgw-admin command') rgw_time = reusable.time_to_list_via_radosgw( bucket_name_to_create, 'unordered') if rgw_time > 0: rgw_time_secs = "{:.4f}".format(rgw_time) rgw_time_mins = "{:.4f}".format(rgw_time / 60) log.info( f'with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins' ) else: raise TestExecError( "object listing via radosgw-admin command failed") # listing via boto and noting the time taken log.info('measure the execution time taken to list via boto') boto_time = reusable.time_to_list_via_boto( bucket_name_to_create, rgw_conn) if boto_time > 0: boto_time_secs = "{:.4f}".format(boto_time) boto_time_mins = "{:.4f}".format(boto_time / 60) log.info( f'with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins' ) else: raise TestExecError("object listing via boto failed") if config.test_ops.get('delete_bucket_object', False): if config.test_ops.get('enable_version', False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, each_user) else: reusable.delete_objects(bucket) time.sleep(30) reusable.delete_bucket(bucket) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") if config.user_remove is True: reusable.remove_user(each_user)