def __init__(self): self._hostname, self._ip = utils.get_hostname_ip() self._ssl_port = 443 self._non_ssl_port = utils.get_radosgw_port_no() self._ceph_conf = CephConfOp() self._rgw_service = RGWService() # _sections_to_check = ['client.rgw.' + self._hostname, # 'client.rgw.' + self._ip] # log.info('checking for existence of sections: {}'.format(_sections_to_check)) # _sections = [section for section in _sections_to_check if self._ceph_conf.check_if_section_exists(section)] # # log.info('got section(s): {}'.format(_sections)) # if not any(_sections): # raise RGWBaseException('No RGW section in ceph.conf') # self.section = _sections[0] sections_in_ceph_conf = self._ceph_conf.cfg.sections() log.info( 'got sections from ceph_conf: {}'.format(sections_in_ceph_conf)) rgw_section = list( filter(lambda section: 'rgw' in section, sections_in_ceph_conf)) if not rgw_section: raise RGWBaseException('No RGW section in ceph.conf') self.section = rgw_section[0] log.info('using section: {}'.format(self.section))
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() if config.cluster_type == "brownfield": log.info("Check sharding is enabled or not") cmd = "radosgw-admin zonegroup get" out = utils.exec_shell_cmd(cmd) zonegroup = json.loads(out) zonegroup = zonegroup.get("enabled_features") log.info(zonegroup) if "resharding" in zonegroup: log.info("sharding is enabled") else: log.info("sharding is not enabled") if config.enable_sharding is True: log.info( "Enabling sharding on cluster since cluster is brownfield") cmd = "radosgw-admin zonegroup get" out = utils.exec_shell_cmd(cmd) zonegroup = json.loads(out) zonegroup_name = zonegroup.get("name") log.info(zonegroup_name) cmd = ( "radosgw-admin zonegroup modify --rgw-zonegroup=%s --enable-feature=resharding" % zonegroup_name) out = utils.exec_shell_cmd(cmd) cmd = "radosgw-admin period update --commit" out = utils.exec_shell_cmd(cmd) cmd = "radosgw-admin zonegroup get" out = utils.exec_shell_cmd(cmd) zonegroup = json.loads(out) zonegroup = zonegroup.get("enabled_features") log.info(zonegroup) if "resharding" in zonegroup: log.info("sharding is enabled") else: raise TestExecError("sharding has not enabled") else: raise TestExecError("sharding has not enabled") if config.cluster_type == "greenfield": log.info("Check sharding is enabled or not") cmd = "radosgw-admin zonegroup get" out = utils.exec_shell_cmd(cmd) zonegroup = json.loads(out) zonegroup = zonegroup.get("enabled_features") log.info(zonegroup) if "resharding" in zonegroup: log.info("sharding has enabled already") else: raise TestExecError("sharding has not enabled already")
def test_exec(config): """ Executes test based on configuration passed Args: config(object): Test configuration """ io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) umgmt = UserMgmt() ceph_conf = CephConfOp() rgw_service = RGWService() # preparing data user_name = resource_op.create_users(no_of_users_to_create=1)[0]["user_id"] tenant = "tenant" tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant, user_id=user_name, displayname=user_name) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_name) hostname = socket.gethostname() ip = socket.gethostbyname(hostname) port = utils.get_radosgw_port_no() ip_and_port = f"{ip}:{port}" s3_auth.do_auth(tenant_user_info, ip_and_port) bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0) # Create a bucket s3cmd_reusable.create_bucket(bucket_name) log.info(f"Bucket {bucket_name} created") # Upload file to bucket uploaded_file_info = s3cmd_reusable.upload_file( bucket_name, test_data_path=TEST_DATA_PATH) uploaded_file = uploaded_file_info["name"] log.info(f"Uploaded file {uploaded_file} to bucket {bucket_name}") # Delete file from bucket s3cmd_reusable.delete_file(bucket_name, uploaded_file) log.info(f"Deleted file {uploaded_file} from bucket {bucket_name}") # Delete bucket s3cmd_reusable.delete_bucket(bucket_name) log.info(f"Bucket {bucket_name} deleted") # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() log.info('starting IO') config.user_count = 1 user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() log.info('sharding configuration will be added now.') if config.sharding_type == 'dynamic': log.info('sharding type is dynamic') # for dynamic, # the number of shards should be greater than [ (no of objects)/(max objects per shard) ] # example: objects = 500 ; max object per shard = 10 # then no of shards should be at least 50 or more time.sleep(15) log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard)) ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding, 'True') num_shards_expected = config.objects_count / config.max_objects_per_shard log.info('num_shards_expected: %s' % num_shards_expected) log.info('trying to restart services ') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') config.bucket_count = 1 objects_created_list = [] log.info('no of buckets to create: %s' % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1) bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) if config.test_ops.get('enable_version', False): log.info('enable bucket version') reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) if config.test_ops.get('enable_version', False): reusable.upload_version_object(config, user_info, rgw_conn, s3_object_name, config.obj_size, bucket, TEST_DATA_PATH) else: reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) objects_created_list.append((s3_object_name, s3_object_path)) if config.sharding_type == 'manual': log.info('sharding type is manual') # for manual. # the number of shards will be the value set in the command. time.sleep(15) log.info('in manual sharding') cmd_exec = utils.exec_shell_cmd( 'radosgw-admin bucket reshard --bucket=%s --num-shards=%s ' '--yes-i-really-mean-it' % (bucket.name, config.shards)) if cmd_exec is False: raise TestExecError("manual resharding command execution failed") sleep_time = 600 log.info(f'verification starts after waiting for {sleep_time} seconds') time.sleep(sleep_time) op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name) json_doc = json.loads(op) bucket_id = json_doc['data']['bucket']['bucket_id'] op2 = utils.exec_shell_cmd( "radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id)) json_doc2 = json.loads((op2)) num_shards_created = json_doc2['data']['bucket_info']['num_shards'] log.info('no_of_shards_created: %s' % num_shards_created) if config.sharding_type == 'manual': if config.shards != num_shards_created: raise TestExecError("expected number of shards not created") log.info('Expected number of shards created') if config.sharding_type == 'dynamic': log.info('Verify if resharding list is empty') reshard_list_op = json.loads( utils.exec_shell_cmd("radosgw-admin reshard list")) if not reshard_list_op: log.info( 'for dynamic number of shards created should be greater than or equal to number of expected shards' ) log.info('no_of_shards_expected: %s' % num_shards_expected) if int(num_shards_created) >= int(num_shards_expected): log.info('Expected number of shards created') else: raise TestExecError('Expected number of shards not created') if config.test_ops.get('delete_bucket_object', False): if config.test_ops.get('enable_version', False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, user_info) else: reusable.delete_objects(bucket) reusable.delete_bucket(bucket)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_config_set = CephConfOp() rgw_service = RGWService() if config.sts is None: raise TestExecError("sts policies are missing in yaml config") # create users config.user_count = 2 users_info = s3lib.create_users(config.user_count) # user1 is the owner user1, user2 = users_info[0], users_info[1] log.info("adding sts config to ceph.conf") sesison_encryption_token = "abcdefghijklmnoq" ceph_config_set.set_to_ceph_conf( "global", ConfigOpts.rgw_sts_key, sesison_encryption_token ) ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") auth = Auth(user1, ssl=config.ssl) iam_client = auth.do_auth_iam_client() policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "") policy_document = policy_document.replace("<user_name>", user2["user_id"]) role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "") add_caps_cmd = ( 'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format( user_id=user1["user_id"] ) ) utils.exec_shell_cmd(add_caps_cmd) role_name = f"S3RoleOf.{user1['user_id']}" log.info(f"role_name: {role_name}") log.info("creating role") create_role_response = iam_client.create_role( AssumeRolePolicyDocument=policy_document, Path="/", RoleName=role_name, ) log.info("create_role_response") log.info(create_role_response) policy_name = f"policy.{user1['user_id']}" log.info(f"policy_name: {policy_name}") log.info("putting role policy") put_policy_response = iam_client.put_role_policy( RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy ) log.info("put_policy_response") log.info(put_policy_response) auth = Auth(user2, ssl=config.ssl) sts_client = auth.do_auth_sts_client() log.info("assuming role") assume_role_response = sts_client.assume_role( RoleArn=create_role_response["Role"]["Arn"], RoleSessionName=user1["user_id"], DurationSeconds=3600, ) log.info(assume_role_response) assumed_role_user_info = { "access_key": assume_role_response["Credentials"]["AccessKeyId"], "secret_key": assume_role_response["Credentials"]["SecretAccessKey"], "session_token": assume_role_response["Credentials"]["SessionToken"], "user_id": user2["user_id"], } log.info("got the credentials after assume role") s3client = Auth(assumed_role_user_info, ssl=config.ssl) s3_client_rgw = s3client.do_auth() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() basic_io_structure = BasicIOInfoStructure() user_info = basic_io_structure.user( **{ "user_id": assumed_role_user_info["user_id"], "access_key": assumed_role_user_info["access_key"], "secret_key": assumed_role_user_info["secret_key"], } ) write_user_info.add_user_info(user_info) buckets_created = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid( assumed_role_user_info["user_id"], rand_no=bc ) log.info("creating bucket with name: %s" % bucket_name) bucket = reusable.create_bucket( bucket_name, s3_client_rgw, assumed_role_user_info ) buckets_created.append(bucket) if config.test_ops["create_object"] is True: for bucket in buckets_created: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, assumed_role_user_info, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, assumed_role_user_info, ) if config.test_ops["server_side_copy"] is True: bucket1, bucket2 = buckets_created # copy object1 from bucket1 to bucket2 with the same name as in bucket1 log.info("copying first object from bucket1 to bucket2") all_keys_in_buck1 = [] for obj in bucket1.objects.all(): all_keys_in_buck1.append(obj.key) copy_source = {"Bucket": bucket1.name, "Key": all_keys_in_buck1[0]} copy_object_name = all_keys_in_buck1[0] + "_copied_obj" log.info(f"copy object name: {copy_object_name}") bucket2.copy(copy_source, copy_object_name) # list the objects in bucket2 log.info("listing all objects im bucket2 after copy") all_bucket2_objs = [] for obj in bucket2.objects.all(): log.info(obj.key) all_bucket2_objs.append(obj.key) # check for object existence in bucket2 if copy_object_name in all_bucket2_objs: log.info("server side copy successful") else: raise TestExecError("server side copy operation was not successful") # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() # authenticate sns client. rgw_sns_conn = auth.do_auth_sns_client() # authenticate with s3 client rgw_s3_client = auth.do_auth_using_client() # get ceph version ceph_version_id, ceph_version_name = utils.get_ceph_version() objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) # create topic with endpoint if config.test_ops["create_topic"] is True: endpoint = config.test_ops.get("endpoint") ack_type = config.test_ops.get("ack_type") topic_id = str(uuid.uuid4().hex[:16]) persistent = False topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id log.info( f"creating a topic with {endpoint} endpoint with ack type {ack_type}" ) if config.test_ops.get("persistent_flag", False): log.info("topic with peristent flag enabled") persistent = config.test_ops.get("persistent_flag") topic = notification.create_topic(rgw_sns_conn, endpoint, ack_type, topic_name, persistent) # get topic attributes if config.test_ops.get("get_topic_info", False): log.info("get topic attributes") get_topic_info = notification.get_topic( rgw_sns_conn, topic, ceph_version_name) # put bucket notification with topic configured for event if config.test_ops["put_get_bucket_notification"] is True: event = config.test_ops.get("event_type") notification_name = "notification-" + str(event) notification.put_bucket_notification( rgw_s3_client, bucket_name_to_create, notification_name, topic, event, ) # get bucket notification log.info( f"get bucket notification for bucket : {bucket_name_to_create}" ) notification.get_bucket_notification( rgw_s3_client, bucket_name_to_create) # create objects if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) # copy objects if config.test_ops.get("copy_object", False): log.info("copy object") status = rgw_s3_client.copy_object( Bucket=bucket_name_to_create, Key="copy_of_object" + s3_object_name, CopySource={ "Bucket": bucket_name_to_create, "Key": s3_object_name, }, ) if status is None: raise TestExecError("copy object failed") # delete objects if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, each_user) else: reusable.delete_objects(bucket) # start kafka broker and consumer event_record_path = "/home/cephuser/event_record" start_consumer = notification.start_kafka_broker_consumer( topic_name, event_record_path) if start_consumer is False: raise TestExecError("Kafka consumer not running") # verify all the attributes of the event record. if event not received abort testcase log.info("verify event record attributes") verify = notification.verify_event_record(event, bucket_name_to_create, event_record_path, ceph_version_name) if verify is False: raise EventRecordDataError( "Event record is empty! notification is not seen") # delete topic logs on kafka broker notification.del_topic_from_kafka_broker(topic_name) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): """ Executes test based on configuration passed Args: config(object): Test configuration """ io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) umgmt = UserMgmt() ceph_conf = CephConfOp() rgw_service = RGWService() # preparing data user_name = resource_op.create_users(no_of_users_to_create=1)[0]["user_id"] tenant = "tenant" tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant, user_id=user_name, displayname=user_name) umgmt.create_subuser(tenant_name=tenant, user_id=user_name) ip_and_port = s3cmd_reusable.get_rgw_ip_and_port() s3_auth.do_auth(tenant_user_info, ip_and_port) bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0) # Create a bucket s3cmd_reusable.create_bucket(bucket_name) log.info(f"Bucket {bucket_name} created") # Upload a 2GB file to bucket uploaded_file_info = s3cmd_reusable.upload_file( bucket_name, file_size=2147483648, test_data_path=TEST_DATA_PATH) uploaded_file = uploaded_file_info["name"] uploaded_file_md5 = uploaded_file_info["md5"] log.info(f"Uploaded file {uploaded_file} to bucket {bucket_name}") if config.gc_verification is True: log.info("making changes to ceph.conf") config.rgw_gc_obj_min_wait = 5 ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_gc_obj_min_wait, str(config.rgw_gc_obj_min_wait), ) log.info("trying to restart services") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") log.info( "download large object again to make gc list with shadow entries") downloaded_file1 = s3cmd_reusable.download_file( bucket_name, uploaded_file, local_file_name="download1.img", test_data_path=TEST_DATA_PATH, ) time.sleep(5) downloaded_file1_md5 = utils.get_md5(downloaded_file1) assert uploaded_file_md5 == downloaded_file1_md5 gc_list_output = json.loads( utils.exec_shell_cmd("radosgw-admin gc list --include-all")) log.info(gc_list_output) if gc_list_output: log.info( "Shadow obj found after setting rgw_gc_obj_min_wait to 5 sec") utils.exec_shell_cmd("radosgw-admin gc process --include-all") log.info( "Object download should not error out in 404 NoSuchKey error") downloaded_file2 = s3cmd_reusable.download_file( bucket_name, uploaded_file, local_file_name="download2.img", test_data_path=TEST_DATA_PATH, ) downloaded_file2_md5 = utils.get_md5(downloaded_file2) assert uploaded_file_md5 == downloaded_file2_md5 # Delete file from bucket s3cmd_reusable.delete_file(bucket_name, uploaded_file) log.info(f"Deleted file {uploaded_file} from bucket {bucket_name}") # Delete bucket s3cmd_reusable.delete_bucket(bucket_name) log.info(f"Bucket {bucket_name} deleted") # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # check the default data log backing default_data_log = reusable.get_default_datalog_type() log.info(f"{default_data_log} is the default data log backing") # check sync status if a multisite cluster reusable.check_sync_status() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) rgw_conn = auth.do_auth() objects_created_list = [] # change the default datalog backing to FIFO if config.test_ops.get("change_datalog_backing", False): logtype = config.test_ops["change_datalog_backing"] log.info(f"change default datalog backing to {logtype}") cmd = f"radosgw-admin datalog type --log-type={logtype}" change_datalog_type = utils.exec_shell_cmd(cmd) if change_datalog_type is False: raise TestExecError("Failed to change the datalog type to fifo") log.info( "restart the rgw daemons and sleep of 30secs for rgw daemon to be up " ) srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket( bucket_name_to_create, rgw_conn, each_user ) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning( bucket, rgw_conn, each_user, write_bucket_io_info ) if config.test_ops["create_object"] is True: # uploading data log.info( "top level s3 objects to create: %s" % config.objects_count ) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc ) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) objects_created_list.append((s3_object_name, s3_object_path)) # deleting the local file created after upload if config.local_file_delete is True: log.info("deleting local file created after the upload") utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # delete object and bucket if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object( bucket, name, path, rgw_conn, each_user ) else: reusable.delete_objects(bucket) time.sleep(30) reusable.delete_bucket(bucket) # check for any ERRORs in datalog list. ref- https://bugzilla.redhat.com/show_bug.cgi?id=1917687 error_in_data_log_list = reusable.check_datalog_list() if error_in_data_log_list: raise TestExecError("Error in datalog list") # check for data log markers. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1831798#c22 data_log_marker = reusable.check_datalog_marker() log.info(f"The data_log_marker is: {data_log_marker}") # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash()
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user if config.dbr_scenario == "brownfield": user_brownfiled = "brownfield_user" all_users_info = s3lib.create_users(config.user_count, user_brownfiled) else: all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get("encryption_algorithm", None) is not None: log.info("encryption enabled, making ceph config changes") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() # enabling sharding if config.test_ops["sharding"]["enable"] is True: log.info("enabling sharding on buckets") max_shards = config.test_ops["sharding"]["max_shards"] log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_override_bucket_index_max_shards, str(max_shards), ) log.info("trying to restart services ") srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.test_ops["compression"]["enable"] is True: compression_type = config.test_ops["compression"]["type"] log.info("enabling compression") cmd = "radosgw-admin zone get" out = utils.exec_shell_cmd(cmd) zone = json.loads(out) zone = zone.get("name") cmd = ( "radosgw-admin zone placement modify --rgw-zone=%s " "--placement-id=default-placement --compression=%s" % (zone, compression_type) ) out = utils.exec_shell_cmd(cmd) ceph_version = utils.exec_shell_cmd("ceph version").split()[4] try: data = json.loads(out) if ceph_version == "luminous": if ( data["placement_pools"][0]["val"]["compression"] == compression_type ): log.info("Compression enabled successfully") else: if ceph_version in ["nautilus", "octopus"]: if ( data["placement_pools"][0]["val"]["storage_classes"][ "STANDARD" ]["compression_type"] == compression_type ): log.info("Compression enabled successfully") except ValueError as e: exit(str(e)) log.info("trying to restart rgw services ") srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.gc_verification is True: conf = config.ceph_conf reusable.set_gc_conf(ceph_conf, conf) if config.dynamic_resharding is True: if utils.check_dbr_support(): log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard), ) srv_restarted = rgw_service.restart() # create buckets if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) if config.bucket_sync_crash is True: is_primary = utils.is_cluster_primary() if is_primary: bucket_name_to_create = "bkt_crash_check" if config.dbr_scenario == "brownfield": bucket_name_to_create = "brownfield_bucket" log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket( bucket_name_to_create, rgw_conn, each_user ) if config.dynamic_resharding is True: reusable.check_sync_status() op = utils.exec_shell_cmd( f"radosgw-admin bucket stats --bucket {bucket.name}" ) json_doc = json.loads(op) old_num_shards = json_doc["num_shards"] log.info(f"no_of_shards_created: {old_num_shards}") if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) if utils.check_dbr_support(): if bucket_name_to_create == "brownfield_bucket": op = utils.exec_shell_cmd( f"radosgw-admin bucket stats --bucket {bucket.name}" ) json_doc = json.loads(op) if bool(json_doc["usage"]): num_object = json_doc["usage"]["rgw.main"][ "num_objects" ] config.objects_count = ( num_object * 2 + config.objects_count ) config.mapped_sizes = utils.make_mapped_sizes(config) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc ) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) if config.test_ops["download_object"] is True: log.info("trying to download object: %s" % s3_object_name) s3_object_download_name = s3_object_name + "." + "download" s3_object_download_path = os.path.join( TEST_DATA_PATH, s3_object_download_name ) log.info( "s3_object_download_path: %s" % s3_object_download_path ) log.info( "downloading to filename: %s" % s3_object_download_name ) if ( config.test_ops.get("encryption_algorithm", None) is not None ): log.info("encryption download") log.info( "encryption algorithm: %s" % config.test_ops["encryption_algorithm"] ) object_downloaded_status = bucket.download_file( s3_object_name, s3_object_download_path, ExtraArgs={ "SSECustomerKey": encryption_key, "SSECustomerAlgorithm": config.test_ops[ "encryption_algorithm" ], }, ) else: object_downloaded_status = s3lib.resource_op( { "obj": bucket, "resource": "download_file", "args": [ s3_object_name, s3_object_download_path, ], } ) if object_downloaded_status is False: raise TestExecError( "Resource execution failed: object download failed" ) if object_downloaded_status is None: log.info("object downloaded") s3_object_downloaded_md5 = utils.get_md5( s3_object_download_path ) s3_object_uploaded_md5 = utils.get_md5(s3_object_path) log.info( "s3_object_downloaded_md5: %s" % s3_object_downloaded_md5 ) log.info( "s3_object_uploaded_md5: %s" % s3_object_uploaded_md5 ) if str(s3_object_uploaded_md5) == str( s3_object_downloaded_md5 ): log.info("md5 match") utils.exec_shell_cmd( "rm -rf %s" % s3_object_download_path ) else: raise TestExecError("md5 mismatch") if config.local_file_delete is True: log.info("deleting local file created after the upload") utils.exec_shell_cmd("rm -rf %s" % s3_object_path) if config.bucket_sync_crash is True: is_primary = utils.is_cluster_primary() if is_primary is False: crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") realm, source_zone = utils.get_realm_source_zone_info() log.info(f"Realm name: {realm}") log.info(f"Source zone name: {source_zone}") for i in range(600): # Running sync command for 600 times op = utils.exec_shell_cmd( f"radosgw-admin bucket sync run --bucket bkt_crash_check --rgw-curl-low-speed-time=0 --source-zone {source_zone} --rgw-realm {realm}" ) crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") time.sleep(1) if config.dynamic_resharding is True: if utils.check_dbr_support(): reusable.check_sync_status() for i in range(10): time.sleep( 60 ) # Adding delay for processing reshard list op = utils.exec_shell_cmd( f"radosgw-admin bucket stats --bucket {bucket.name}" ) json_doc = json.loads(op) new_num_shards = json_doc["num_shards"] log.info(f"no_of_shards_created: {new_num_shards}") if new_num_shards > old_num_shards: break else: raise TestExecError( "num shards are same after processing resharding" ) if config.manual_resharding is True: if utils.check_dbr_support(): op = utils.exec_shell_cmd( f"radosgw-admin bucket stats --bucket {bucket.name}" ) json_doc = json.loads(op) old_num_shards = json_doc["num_shards"] log.info(f"no_of_shards_created: {old_num_shards}") op = utils.exec_shell_cmd( f"radosgw-admin reshard add --bucket {bucket.name} --num-shards {config.shards}" ) op = utils.exec_shell_cmd("radosgw-admin reshard process") time.sleep(60) op = utils.exec_shell_cmd( f"radosgw-admin bucket stats --bucket {bucket.name}" ) json_doc = json.loads(op) new_num_shards = json_doc["num_shards"] log.info(f"no_of_shards_created: {new_num_shards}") if new_num_shards <= old_num_shards: raise TestExecError( "num shards are same after processing resharding" ) # verification of shards after upload if config.test_datalog_trim_command is True: shard_id, end_marker = reusable.get_datalog_marker() cmd = f"sudo radosgw-admin datalog trim --shard-id {shard_id} --end-marker {end_marker} --debug_ms=1 --debug_rgw=20" out, err = utils.exec_shell_cmd(cmd, debug_info=True) if "Segmentation fault" in err: raise TestExecError("Segmentation fault occured") if config.test_ops["sharding"]["enable"] is True: cmd = ( "radosgw-admin metadata get bucket:%s | grep bucket_id" % bucket.name ) out = utils.exec_shell_cmd(cmd) b_id = ( out.replace('"', "") .strip() .split(":")[1] .strip() .replace(",", "") ) cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id out = utils.exec_shell_cmd(cmd2) log.info("got output from sharing verification.--------") # print out bucket stats and verify in logs for compressed data by # comparing size_kb_utilized and size_kb_actual if config.test_ops["compression"]["enable"] is True: cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name out = utils.exec_shell_cmd(cmd) # print out bucket stats and verify in logs for compressed data by # comparing size_kb_utilized and size_kb_actual if config.test_ops["compression"]["enable"] is True: cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name out = utils.exec_shell_cmd(cmd) if config.test_ops["delete_bucket_object"] is True: reusable.delete_objects(bucket) time.sleep(10) reusable.check_sync_status() reusable.delete_bucket(bucket) # disable compression after test if config.test_ops["compression"]["enable"] is True: log.info("disable compression") cmd = "radosgw-admin zone get" out = utils.exec_shell_cmd(cmd) zone = json.loads(out) zone = zone.get("name") cmd = ( "radosgw-admin zone placement modify --rgw-zone=%s " "--placement-id=default-placement --compression=none" % zone ) out = utils.exec_shell_cmd(cmd) srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.gc_verification is True: final_op = reusable.verify_gc() if final_op != -1: test_info.failed_status("test failed") sys.exit(1) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 30 config.rgw_lc_max_worker = 10 log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) ceph_version = utils.exec_shell_cmd("ceph version") op = ceph_version.split() for i in op: if i == 'nautilus': ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker)) log.info('trying to restart services') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') config.user_count = 1 config.bucket_count = 1 # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info('no of buckets to create: %s' % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1) obj_list = [] obj_tag = 'suffix1=WMV1' bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) prefix = list(map(lambda x: x, [rule['Filter'].get('Prefix') or rule['Filter']['And'].get('Prefix') for rule in config.lifecycle_conf])) prefix = prefix if prefix else ['dummy1'] if config.test_ops['enable_versioning'] is True: reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) if config.test_ops['create_object'] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + '.' + bucket.name + '.' + str(oc) obj_list.append(s3_object_name) if config.test_ops['version_count'] > 0: for vc in range(config.test_ops['version_count']): log.info('version count for %s is %s' % (s3_object_name, str(vc))) log.info('modifying data: %s' % s3_object_name) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=True, append_msg='hello object for version: %s\n' % str(vc)) else: log.info('s3 objects to create: %s' % config.objects_count) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_prefix_rule(bucket, config) if config.test_ops['delete_marker'] is True: life_cycle_rule_new = {"Rules": config.delete_marker_ops} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config) if config.test_ops['enable_versioning'] is False: if config.test_ops['create_object'] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + '.' + bucket.name + '.' + str(oc) obj_list.append(s3_object_name) reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_and_rule(bucket, config) reusable.remove_user(user_info)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get("encryption_algorithm", None) is not None: log.info("encryption enabled, making ceph config changes") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() # enabling sharding if config.test_ops["sharding"]["enable"] is True: log.info("enabling sharding on buckets") max_shards = config.test_ops["sharding"]["max_shards"] log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_override_bucket_index_max_shards, str(max_shards), ) log.info("trying to restart services ") srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.test_ops["compression"]["enable"] is True: compression_type = config.test_ops["compression"]["type"] log.info("enabling compression") cmd = "radosgw-admin zone get" out = utils.exec_shell_cmd(cmd) zone = json.loads(out) zone = zone.get("name") cmd = ("radosgw-admin zone placement modify --rgw-zone=%s " "--placement-id=default-placement --compression=%s" % (zone, compression_type)) out = utils.exec_shell_cmd(cmd) ceph_version = utils.exec_shell_cmd("ceph version").split()[4] try: data = json.loads(out) if ceph_version == "luminous": if (data["placement_pools"][0]["val"]["compression"] == compression_type): log.info("Compression enabled successfully") else: if ceph_version in ["nautilus", "octopus"]: if (data["placement_pools"][0]["val"] ["storage_classes"]["STANDARD"]["compression_type"] == compression_type): log.info("Compression enabled successfully") except ValueError as e: exit(str(e)) log.info("trying to restart rgw services ") srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.gc_verification is True: conf = config.ceph_conf reusable.set_gc_conf(ceph_conf, conf) # create buckets if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) if config.test_ops["download_object"] is True: log.info("trying to download object: %s" % s3_object_name) s3_object_download_name = s3_object_name + "." + "download" s3_object_download_path = os.path.join( TEST_DATA_PATH, s3_object_download_name) log.info("s3_object_download_path: %s" % s3_object_download_path) log.info("downloading to filename: %s" % s3_object_download_name) if (config.test_ops.get("encryption_algorithm", None) is not None): log.info("encryption download") log.info( "encryption algorithm: %s" % config.test_ops["encryption_algorithm"]) object_downloaded_status = bucket.download_file( s3_object_name, s3_object_download_path, ExtraArgs={ "SSECustomerKey": encryption_key, "SSECustomerAlgorithm": config. test_ops["encryption_algorithm"], }, ) else: object_downloaded_status = s3lib.resource_op({ "obj": bucket, "resource": "download_file", "args": [ s3_object_name, s3_object_download_path, ], }) if object_downloaded_status is False: raise TestExecError( "Resource execution failed: object download failed" ) if object_downloaded_status is None: log.info("object downloaded") s3_object_downloaded_md5 = utils.get_md5( s3_object_download_path) s3_object_uploaded_md5 = utils.get_md5( s3_object_path) log.info("s3_object_downloaded_md5: %s" % s3_object_downloaded_md5) log.info("s3_object_uploaded_md5: %s" % s3_object_uploaded_md5) if str(s3_object_uploaded_md5) == str( s3_object_downloaded_md5): log.info("md5 match") utils.exec_shell_cmd("rm -rf %s" % s3_object_download_path) else: raise TestExecError("md5 mismatch") if config.local_file_delete is True: log.info( "deleting local file created after the upload") utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # verification of shards after upload if config.test_ops["sharding"]["enable"] is True: cmd = ( "radosgw-admin metadata get bucket:%s | grep bucket_id" % bucket.name) out = utils.exec_shell_cmd(cmd) b_id = (out.replace( '"', "").strip().split(":")[1].strip().replace(",", "")) cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id out = utils.exec_shell_cmd(cmd2) log.info( "got output from sharing verification.--------") # print out bucket stats and verify in logs for compressed data by # comparing size_kb_utilized and size_kb_actual if config.test_ops["compression"]["enable"] is True: cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name out = utils.exec_shell_cmd(cmd) # print out bucket stats and verify in logs for compressed data by # comparing size_kb_utilized and size_kb_actual if config.test_ops["compression"]["enable"] is True: cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name out = utils.exec_shell_cmd(cmd) if config.test_ops["delete_bucket_object"] is True: reusable.delete_objects(bucket) log.info( "set debug_rgw to 20 before delete the bucket") config.debug_rgw = 20 ceph_conf.set_to_ceph_conf("global", ConfigOpts.debug_rgw, str(config.debug_rgw)) log.info("trying to restart services") srv_restarted = rgw_service.restart() time.sleep(20) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") reusable.delete_bucket(bucket) # disable compression after test if config.test_ops["compression"]["enable"] is True: log.info("disable compression") cmd = "radosgw-admin zone get" out = utils.exec_shell_cmd(cmd) zone = json.loads(out) zone = zone.get("name") cmd = ("radosgw-admin zone placement modify --rgw-zone=%s " "--placement-id=default-placement --compression=none" % zone) out = utils.exec_shell_cmd(cmd) srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") if config.gc_verification is True: final_op = reusable.verify_gc() if final_op != -1: test_info.failed_status("test failed") sys.exit(1) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info('no of buckets to create: %s' % config.bucket_count) # create buckets if config.test_ops['create_bucket'] is True: for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, user_info) if config.test_ops['create_object'] is True: # uploading data log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get('upload_type') == 'multipart': log.info('upload type: multipart') reusable.upload_mutipart_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) else: log.info('upload type: normal') reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) if config.gc_verification is True: log.info('making changes to ceph.conf') config.rgw_gc_obj_min_wait = 5 ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait, str(config.rgw_gc_obj_min_wait)) log.info('trying to restart services') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') log.info('download the large object again to populate gc list with shadow entries') reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config) time.sleep(60) gc_list_output = json.loads(utils.exec_shell_cmd("radosgw-admin gc list --include-all")) log.info(gc_list_output) if gc_list_output: log.info("Shadow objects found after setting the rgw_gc_obj_min_wait to 5 seconds") utils.exec_shell_cmd("radosgw-admin gc process") log.info('Object download should not error out in 404 NoSuchKey error') reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config) reusable.remove_user(user_info)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) rgw_service = RGWService() log.info( "adding indexless placement to placement target of default zonegroup") zonegroup_set = utils.exec_shell_cmd( 'radosgw-admin zonegroup placement add --rgw-zonegroup="default" --placement-id="indexless-placement"' ) log.info("adding indexless placement to placement pool of default zone") zone_set = utils.exec_shell_cmd( 'radosgw-admin zone placement add --rgw-zone="default" --placement-id="indexless-placement" --data-pool="default.rgw.buckets.data" --index-pool="default.rgw.buckets.index" --data_extra_pool="default.rgw.buckets.non-ec" --placement-index-type="indexless"' ) log.info("making indexless-placement as default") indexless_default = utils.exec_shell_cmd( 'radosgw-admin zonegroup placement default --placement-id="indexless-placement"' ) log.info("restart the rgw daemons") restart_service = rgw_service.restart() if restart_service is False: raise TestExecError("RGW service restart failed") log.info("sleep for 20 seconds after RGW service restart") time.sleep(20) # perform s3 operations all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) log.info("upload type: normal") reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user) # verify the bucket created has index_type = Indexless log.info("verify the bucket created has index_type as Indexless") bucket_stats = utils.exec_shell_cmd( "radosgw-admin bucket stats --bucket %s" % bucket_name_to_create) bucket_stats_json = json.loads(bucket_stats) bkt_index_type = bucket_stats_json["index_type"] if bkt_index_type == "Indexless": log.info(f"index_type is Indexless for bucket %s" % bucket_name_to_create) else: raise TestExecError(" index_type is not Indexless as expected") # delete bucket and objects if config.test_ops["delete_bucket"] is True: log.info("Deleting buckets and objects") reusable.delete_bucket(bucket) # reverting to default placement group log.info("revert changes to zone, zonegroup and default placement target") zone_set = utils.exec_shell_cmd( 'radosgw-admin zone placement rm --rgw-zone="default" --placement-id="indexless-placement" ' ) if "indexless" in zone_set: raise TestExecError( "Indexless placement present in zone even after revert") zonegroup_set = utils.exec_shell_cmd( 'radosgw-admin zonegroup placement rm --rgw-zonegroup="default" --placement-id="indexless-placement"' ) if "indexless" in zonegroup_set: raise TestExecError( "Indexless placement present in zonegroup even after revert") default_placement = utils.exec_shell_cmd( 'radosgw-admin zonegroup placement default --placement-id="default-placement"' ) log.info("restart the rgw daemons") restart_service = rgw_service.restart() if restart_service is False: raise TestExecError("RGW service restart failed") log.info("sleep for 20 seconds after RGW service restart") time.sleep(20) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_config_set = CephConfOp() rgw_service = RGWService() if config.sts is None: raise TestExecError("sts policies are missing in yaml config") # create users config.user_count = 2 users_info = s3lib.create_users(config.user_count) user1, user2 = users_info[0], users_info[1] log.info("adding sts config to ceph.conf") sesison_encryption_token = "abcdefghijklmnoq" ceph_config_set.set_to_ceph_conf( "global", ConfigOpts.rgw_sts_key, sesison_encryption_token ) ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") # Adding caps for user1 add_caps_cmd = ( 'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format( user_id=user1["user_id"] ) ) utils.exec_shell_cmd(add_caps_cmd) # user1 auth with iam_client auth = Auth(user1, ssl=config.ssl) iam_client = auth.do_auth_iam_client() # policy document policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "") policy_document = policy_document.replace("<user_name>", user2["user_id"]) print(policy_document) # role policy role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "") print(role_policy) role_name = f"S3RoleOf.{user1['user_id']}" log.info(f"role_name: {role_name}") # role creation happens here log.info("creating role") create_role_response = iam_client.create_role( AssumeRolePolicyDocument=policy_document, Path="/", RoleName=role_name, ) log.info("create_role_response") log.info(create_role_response) # Put role policy happening here policy_name = f"policy.{user1['user_id']}" log.info(f"policy_name: {policy_name}") log.info("putting role policy") put_policy_response = iam_client.put_role_policy( RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy ) log.info("put_policy_response") log.info(put_policy_response) # bucket creation operations now bucket_name = "testbucket" + user1["user_id"] # authenticating user1 for bucket creation operation auth = Auth(user1, ssl=config.ssl) user1_info = { "access_key": user1["access_key"], "secret_key": user1["secret_key"], "user_id": user1["user_id"], } s3_client_u1 = auth.do_auth() # bucket creation operation bucket = reusable.create_bucket(bucket_name, s3_client_u1, user1_info) # uploading objects to the bucket if config.test_ops["create_object"]: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket_name, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, user1_info, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, user1_info, ) auth = Auth(user2, ssl=config.ssl) sts_client = auth.do_auth_sts_client() log.info("assuming role") assume_role_response = sts_client.assume_role( RoleArn=create_role_response["Role"]["Arn"], RoleSessionName=user1["user_id"], DurationSeconds=3600, ) log.info(assume_role_response) assumed_role_user_info = { "access_key": assume_role_response["Credentials"]["AccessKeyId"], "secret_key": assume_role_response["Credentials"]["SecretAccessKey"], "session_token": assume_role_response["Credentials"]["SessionToken"], "user_id": user2["user_id"], } log.info("got the credentials after assume role") s3client = Auth(assumed_role_user_info, ssl=config.ssl) s3_client = s3client.do_auth_using_client() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() basic_io_structure = BasicIOInfoStructure() user_info = basic_io_structure.user( **{ "user_id": assumed_role_user_info["user_id"], "access_key": assumed_role_user_info["access_key"], "secret_key": assumed_role_user_info["secret_key"], } ) write_user_info.add_user_info(user_info) unexisting_object = bucket_name + "_unexisting_object" try: response = s3_client.head_object(Bucket=bucket_name, Key=unexisting_object) except botocore.exceptions.ClientError as e: response_code = e.response["Error"]["Code"] log.info(response_code) if e.response["Error"]["Code"] == "404": log.info("404 Unexisting Object Not Found") elif e.response["Error"]["Code"] == "403": raise TestExecError("Error code : 403 - HeadObject operation: Forbidden")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_config_set = CephConfOp() rgw_service = RGWService() # create users config.user_count = 2 users_info = s3lib.create_users(config.user_count) # user1 is the owner user1, user2 = users_info[0], users_info[1] log.info("adding sts config to ceph.conf") sesison_encryption_token = "abcdefghijklmnoq" ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_sts_key, sesison_encryption_token) ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, True) srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") auth = Auth(user1) iam_client = auth.do_auth_iam_client() """ TODO: policy_document and role_policy can be used valid dict types. need to explore on this. """ policy_document = ( '{"Version":"2012-10-17",' '"Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/%s"]},' '"Action":["sts:AssumeRole"]}]}' % (user2["user_id"])) role_policy = ('{"Version":"2012-10-17",' '"Statement":{"Effect":"Allow",' '"Action":"s3:*",' '"Resource":"arn:aws:s3:::*"}}') add_caps_cmd = ( 'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'. format(user_id=user1["user_id"])) utils.exec_shell_cmd(add_caps_cmd) # log.info(policy_document) role_name = f"S3RoleOf.{user1['user_id']}" log.info(f"role_name: {role_name}") log.info("creating role") create_role_response = iam_client.create_role( AssumeRolePolicyDocument=policy_document, Path="/", RoleName=role_name, ) log.info("create_role_response") log.info(create_role_response) policy_name = f"policy.{user1['user_id']}" log.info(f"policy_name: {policy_name}") log.info("putting role policy") put_policy_response = iam_client.put_role_policy( RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy) log.info("put_policy_response") log.info(put_policy_response) auth = Auth(user2) sts_client = auth.do_auth_sts_client() log.info("assuming role") assume_role_response = sts_client.assume_role( RoleArn=create_role_response["Role"]["Arn"], RoleSessionName=user1["user_id"], DurationSeconds=3600, ) log.info(assume_role_response) assumed_role_user_info = { "access_key": assume_role_response["Credentials"]["AccessKeyId"], "secret_key": assume_role_response["Credentials"]["SecretAccessKey"], "session_token": assume_role_response["Credentials"]["SessionToken"], "user_id": user2["user_id"], } log.info("got the credentials after assume role") s3client = Auth(assumed_role_user_info) s3_client_rgw = s3client.do_auth() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() basic_io_structure = BasicIOInfoStructure() user_info = basic_io_structure.user( **{ "user_id": assumed_role_user_info["user_id"], "access_key": assumed_role_user_info["access_key"], "secret_key": assumed_role_user_info["secret_key"], }) write_user_info.add_user_info(user_info) if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( assumed_role_user_info["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, s3_client_rgw, assumed_role_user_info) if config.test_ops["create_object"] is True: # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, assumed_role_user_info, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, assumed_role_user_info, ) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authentication auth = Auth(each_user, ssl=config.ssl) s3_conn_client = auth.do_auth_using_client() # create buckets with object lock configuration if config.test_ops["create_bucket"] is True: log.info(f"no of buckets to create: {config.bucket_count}") for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) log.info(f"creating bucket with name: {bucket_name_to_create}") rgw_ip_and_port = get_rgw_ip_and_port() s3_conn_client.create_bucket( Bucket=bucket_name_to_create, ObjectLockEnabledForBucket=True ) # put object lock configuration for bucket s3_conn_client.put_object_lock_configuration( Bucket=bucket_name_to_create, ObjectLockConfiguration={ "ObjectLockEnabled": "Enabled", "Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}, }, ) if config.test_ops["create_object"] is True: # uploading data log.info(f"s3 objects to create: {config.objects_count}") for oc, size in list(config.mapped_sizes.items()): s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, 0 ) log.info(f"s3 object name: {s3_object_name}") s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info(f"s3 object path: {s3_object_path}") log.info("upload type: normal") io_generator(TEST_DATA_PATH + "/" + s3_object_name, size) s3_conn_client.put_object( Body=TEST_DATA_PATH + "/" + s3_object_name, Bucket=bucket_name_to_create, Key=s3_object_name, ) log.info("Verify version count") # Verify version count versions = s3_conn_client.list_object_versions( Bucket=bucket_name_to_create ) versions_count = len(versions["Versions"]) error_message = ( f"Expected: {config.objects_count}, Actual: {versions_count}" ) if versions_count == config.objects_count: log.info("Expected and actual version count is same") else: raise ObjectVersionCountMismatch(error_message) # Verify delete disabled for object log.info("Verify delete disabled for object") for version_dict in versions["Versions"]: try: s3_conn_client.delete_object( Bucket=bucket_name_to_create, Key=s3_object_name, VersionId=version_dict["VersionId"], ) raise AccessDeniedObjectDeleted( "Access denied object deleted" ) except boto3exception.ClientError as e: expected_code = "AccessDenied" actual_code = e.response["Error"]["Code"] assert ( actual_code == expected_code ), "Expected: {expected_code}, Actual: {actual_code}" # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get("encryption_algorithm", None) is not None: log.info("encryption enabled, making ceph config changes") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) if config.test_ops["create_object"] is True: if config.test_ops["object_structure"] == "flat": # uploading data log.info("top level s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join( TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get( "upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) objects_created_list.append( (s3_object_name, s3_object_path)) # deleting the local file created after upload if config.local_file_delete is True: log.info( "deleting local file created after the upload" ) utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72 if config.test_ops["object_structure"] == "pseudo": log.info( f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each" ) for count in range(config.pseudo_dir_count): s3_pseudo_dir_name = utils.gen_s3_object_name( bucket_name_to_create, count) s3_object_path = os.path.join( TEST_DATA_PATH, s3_pseudo_dir_name) manage_data.pseudo_dir_generator(s3_object_path) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_pseudo_object_name( s3_pseudo_dir_name, oc) log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join( TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) if config.test_ops.get( "upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) # deleting the local file created after upload if config.local_file_delete is True: log.info( "deleting local file created after the upload" ) utils.exec_shell_cmd("rm -rf %s" % s3_object_path) # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0 if config.test_ops["create_object"] is False: if config.test_ops[ "object_structure"] == "pseudo-dir-only": log.info( f"pseudo directories to create {config.pseudo_dir_count}" ) for count in range(config.pseudo_dir_count): s3_pseudo_dir_name = utils.gen_s3_object_name( bucket_name_to_create, count) utils.create_psuedo_dir(s3_pseudo_dir_name, bucket) # radoslist listing of the bucket if config.test_ops["radoslist"] is True: log.info( "executing the command radosgw-admin bucket radoslist " ) radoslist = utils.exec_shell_cmd( "radosgw-admin bucket radoslist --bucket %s" % bucket_name_to_create) if radoslist is False: raise TestExecError( "Radoslist command execution failed") # get the configuration parameter - rgw_bucket_index_max_aio ceph_version_id, ceph_version_name = utils.get_ceph_version() if ceph_version_name in ["luminous", "nautilus"]: cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep rgw_bucket_index_max_aio" max_aio_output = utils.exec_shell_cmd(cmd) max_aio = max_aio_output.split()[1] else: cmd = "ceph config get mon rgw_bucket_index_max_aio" max_aio_output = utils.exec_shell_cmd(cmd) max_aio = max_aio_output.rstrip("\n") # bucket stats to get the num_objects of the bucket bucket_stats = utils.exec_shell_cmd( "radosgw-admin bucket stats --bucket %s" % bucket_name_to_create) bucket_stats_json = json.loads(bucket_stats) bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][ "num_objects"] # ordered listing via radosgw-admin command and noting time taken log.info( "measure the execution time taken to list via radosgw-admin command" ) if config.test_ops["radosgw_listing_ordered"] is True: log.info("ordered listing via radosgw-admin command") rgw_cmd_time = reusable.time_to_list_via_radosgw( bucket_name_to_create, "ordered") if rgw_cmd_time > 0: rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time) rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60) log.info( f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins" ) else: raise TestExecError( "object listing via radosgw-admin command failed") # unordered listing via radosgw-admin command and noting time taken if config.test_ops["radosgw_listing_ordered"] is False: log.info("unordered listing via radosgw-admin command") rgw_time = reusable.time_to_list_via_radosgw( bucket_name_to_create, "unordered") if rgw_time > 0: rgw_time_secs = "{:.4f}".format(rgw_time) rgw_time_mins = "{:.4f}".format(rgw_time / 60) log.info( f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins" ) else: raise TestExecError( "object listing via radosgw-admin command failed") # listing via boto and noting the time taken log.info("measure the execution time taken to list via boto") boto_time = reusable.time_to_list_via_boto( bucket_name_to_create, rgw_conn) if boto_time > 0: boto_time_secs = "{:.4f}".format(boto_time) boto_time_mins = "{:.4f}".format(boto_time / 60) log.info( f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins" ) else: raise TestExecError("object listing via boto failed") # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265 if config.radoslist_all is True: log.info( "Executing the command radosgw-admin bucket radoslist on all buckets" ) cmd = "radosgw-admin bucket radoslist | grep ERROR" radoslist_all_error = utils.exec_shell_cmd(cmd) if radoslist_all_error: raise TestExecError("ERROR in radoslist command") if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, each_user) else: reusable.delete_objects(bucket) time.sleep(30) reusable.delete_bucket(bucket) # check sync status if a multisite cluster reusable.check_sync_status() # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") if config.user_remove is True: reusable.remove_user(each_user)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() log.info("starting IO") config.user_count = 1 user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() log.info("sharding configuration will be added now.") if config.sharding_type == "dynamic": log.info("sharding type is dynamic") # for dynamic, # the number of shards should be greater than [ (no of objects)/(max objects per shard) ] # example: objects = 500 ; max object per shard = 10 # then no of shards should be at least 50 or more time.sleep(15) log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard), ) ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_dynamic_resharding, "True") num_shards_expected = config.objects_count / config.max_objects_per_shard log.info("num_shards_expected: %s" % num_shards_expected) log.info("trying to restart services ") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") config.bucket_count = 1 objects_created_list = [] log.info("no of buckets to create: %s" % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1) bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) if config.test_ops.get("enable_version", False): log.info("enable bucket version") reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) if config.test_ops.get("enable_version", False): reusable.upload_version_object( config, user_info, rgw_conn, s3_object_name, config.obj_size, bucket, TEST_DATA_PATH, ) else: reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) objects_created_list.append((s3_object_name, s3_object_path)) if config.sharding_type == "manual": log.info("sharding type is manual") # for manual. # the number of shards will be the value set in the command. time.sleep(15) log.info("in manual sharding") cmd_exec = utils.exec_shell_cmd( "radosgw-admin bucket reshard --bucket=%s --num-shards=%s " "--yes-i-really-mean-it" % (bucket.name, config.shards)) if cmd_exec is False: raise TestExecError("manual resharding command execution failed") sleep_time = 600 log.info(f"verification starts after waiting for {sleep_time} seconds") time.sleep(sleep_time) op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket %s" % bucket.name) json_doc = json.loads(op) num_shards_created = json_doc["num_shards"] log.info("no_of_shards_created: %s" % num_shards_created) if config.sharding_type == "manual": if config.shards != num_shards_created: raise TestExecError("expected number of shards not created") log.info("Expected number of shards created") if config.sharding_type == "dynamic": log.info("Verify if resharding list is empty") reshard_list_op = json.loads( utils.exec_shell_cmd("radosgw-admin reshard list")) if not reshard_list_op: log.info( "for dynamic number of shards created should be greater than or equal to number of expected shards" ) log.info("no_of_shards_expected: %s" % num_shards_expected) if int(num_shards_created) >= int(num_shards_expected): log.info("Expected number of shards created") else: raise TestExecError("Expected number of shards not created") if config.test_ops.get("delete_bucket_object", False): if config.test_ops.get("enable_version", False): for name, path in objects_created_list: reusable.delete_version_object(bucket, name, path, rgw_conn, user_info) else: reusable.delete_objects(bucket) reusable.delete_bucket(bucket) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): test_info = AddTestInfo("storage_policy for %s" % config.rgw_client) io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) rgw_service = RGWService() try: # create pool pool_name = ".rgw.buckets.special" pg_num = "8" pgp_num = "8" pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % ( pool_name, pg_num, pgp_num, ) pool_create_exec = utils.exec_shell_cmd(pool_create) if pool_create_exec is False: raise TestExecError("Pool creation failed") # create realm realm_name = "buz-tickets" log.info("creating realm name") realm_create = ( "sudo radosgw-admin realm create --rgw-realm=%s --default" % realm_name ) realm_create_exec = utils.exec_shell_cmd(realm_create) if realm_create_exec is False: raise TestExecError("cmd execution failed") # sample output of create realm """ { "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62", "name": "buz-tickets", "current_period": "1950b710-3e63-4c41-a19e-46a715000980", "epoch": 1 } """ log.info("modify zonegroup ") modify = ( "sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default" % realm_name ) modify_exec = utils.exec_shell_cmd(modify) if modify_exec is False: raise TestExecError("cmd execution failed") # get the zonegroup zonegroup_file = "zonegroup.json" get_zonegroup = ( "sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s" % zonegroup_file ) get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup) if get_zonegroup_exec is False: raise TestExecError("cmd execution failed") add_to_placement_targets = {"name": "special-placement", "tags": []} fp = open(zonegroup_file, "r") zonegroup_txt = fp.read() fp.close() log.info("got zonegroup info: \n%s" % zonegroup_txt) zonegroup = json.loads(zonegroup_txt) log.info("adding placement targets") zonegroup["placement_targets"].append(add_to_placement_targets) with open(zonegroup_file, "w") as fp: json.dump(zonegroup, fp) zonegroup_set = "sudo radosgw-admin zonegroup set < %s" % zonegroup_file zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set) if zonegroup_set_exec is False: raise TestExecError("cmd execution failed") log.info("zone group update completed") log.info("getting zone file") # get zone log.info("getting zone info") zone_file = "zone.json" get_zone = "sudo radosgw-admin zone --rgw-zone=default get > zone.json" get_zone_exec = utils.exec_shell_cmd(get_zone) if get_zone_exec is False: raise TestExecError("cmd execution failed") fp = open(zone_file, "r") zone_info = fp.read() fp.close() log.info("zone_info :\n%s" % zone_info) zone_info_cleaned = json.loads(zone_info) special_placement_info = { "key": "special-placement", "val": { "index_pool": ".rgw.buckets.index", "data_pool": ".rgw.buckets.special", "data_extra_pool": ".rgw.buckets.extra", }, } log.info("adding special placement info") zone_info_cleaned["placement_pools"].append(special_placement_info) print(zone_info_cleaned) with open(zone_file, "w+") as fp: json.dump(zone_info_cleaned, fp) zone_file_set = "sudo radosgw-admin zone set < %s" % zone_file zone_file_set_exec = utils.exec_shell_cmd(zone_file_set) if zone_file_set_exec is False: raise TestExecError("cmd execution failed") log.info("zone info updated ") restarted = rgw_service.restart() if restarted is False: raise TestExecError("service restart failed") if config.rgw_client == "rgw": log.info("client type is rgw") rgw_user_info = s3_swift_lib.create_users(1) auth = Auth(rgw_user_info) rgw_conn = auth.do_auth() # create bucket bucket_name = utils.gen_bucket_name_from_userid(rgw_user_info["user_id"], 0) bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info) # create object s3_object_name = utils.gen_s3_object_name(bucket_name, 0) resuables.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info ) if config.rgw_client == "swift": log.info("client type is swift") user_names = ["tuffy", "scooby", "max"] tenant = "tenant" umgmt = UserMgmt() umgmt.create_tenant_user( tenant_name=tenant, user_id=user_names[0], displayname=user_names[0] ) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0]) auth = Auth(user_info) rgw = auth.do_auth() container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=0 ) container = s3_swift_lib.resource_op( {"obj": rgw, "resource": "put_container", "args": [container_name]} ) if container is False: raise TestExecError( "Resource execution failed: container creation faield" ) swift_object_name = utils.gen_s3_object_name( "%s.container.%s" % (user_names[0], 0), 0 ) log.info("object name: %s" % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info("object path: %s" % object_path) object_size = utils.get_file_size( config.objects_size_range["min"], config.objects_size_range["max"] ) data_info = manage_data.io_generator(object_path, object_size) # upload object if data_info is False: TestExecError("data creation failed") log.info("uploading object: %s" % object_path) with open(object_path, "r") as fp: rgw.put_object( container_name, swift_object_name, contents=fp.read(), content_type="text/plain", ) test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) umgmt = UserMgmt() ceph_conf = CephConfOp() log.info(type(ceph_conf)) rgw_service = RGWService() # preparing data user_names = ["tuffy", "scooby", "max"] tenant = "tenant" tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0]) auth = Auth(user_info) rgw = auth.do_auth() for cc in range(config.container_count): if config.version_enable is True: log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_swift_versioning_enabled, "True") log.info("trying to restart services ") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") container_name_old = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=str(cc) + "old") log.info(container_name_old) container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "kwargs": dict(container=container_name_old), }) container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=str(cc) + "new") log.info(container_name) container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "args": [ container_name, { "X-Versions-Location": container_name_old }, ], }) if container is False: raise TestExecError( "Resource execution failed: container creation failed") ls = [] swift_object_name = "" for version_count in range(config.version_count): for oc, size in list(config.mapped_sizes.items()): swift_object_name = fill_container(rgw, container_name, user_names[0], oc, cc, size) ls = rgw.get_container(container_name_old) ls = list(ls) if config.copy_version_object is True: old_obj_name = ls[1][config.version_count - 2]["name"] log.info(old_obj_name) container = swiftlib.resource_op({ "obj": rgw, "resource": "copy_object", "kwargs": dict( container=container_name_old, obj=old_obj_name, destination=container_name + "/" + swift_object_name, ), }) if container is False: raise TestExecError("Resource execution failed") log.info("Successfully copied item") else: current_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format( uid=user_names[0], tenant=tenant, bucket=container_name) num_obj_current = utils.exec_shell_cmd(current_count) num_obj_current = json.loads(num_obj_current) num_obj_current = (num_obj_current[0].get("usage").get( "rgw.main").get("num_objects")) old_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format( uid=user_names[0], tenant=tenant, bucket=container_name_old) num_obj_old = utils.exec_shell_cmd(old_count) num_obj_old = json.loads(num_obj_old) num_obj_old = (num_obj_old[0].get("usage").get("rgw.main").get( "num_objects")) version_count_from_config = ( config.objects_count * config.version_count) - config.objects_count if (num_obj_current == config.objects_count) and ( num_obj_old == version_count_from_config): log.info("objects and versioned obbjects are correct") else: test_info.failed_status("test failed") elif config.object_expire is True: container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=cc) container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "args": [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation failed") for oc, size in list(config.mapped_sizes.items()): swift_object_name = fill_container( rgw, container_name, user_names[0], oc, cc, size, header={"X-Delete-After": 5}, ) time.sleep(7) container_exists = swiftlib.resource_op({ "obj": rgw, "resource": "get_object", "args": [container_name, swift_object_name], }) log.info(container_exists) if container_exists: msg = "test failed as the objects are still present" test_info.failed_status(msg) raise TestExecError(msg) elif config.large_object_upload is True: container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=cc) container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "args": [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation failed") for oc, size in list(config.mapped_sizes.items()): swift_object_name = fill_container( rgw, container_name, user_names[0], oc, cc, size, multipart=True, split_size=config.split_size, ) container_name_new = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=str(cc) + "New") container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "kwargs": dict(container=container_name_new), }) if container is False: raise TestExecError( "Resource execution failed: container creation failed") container = swiftlib.resource_op({ "obj": rgw, "resource": "put_object", "kwargs": dict( container=container_name_new, obj=swift_object_name, contents=None, headers={ "X-Object-Manifest": container_name + "/" + swift_object_name + "/" }, ), }) if container is False: raise TestExecError( "Resource execution failed: container creation failed") if config.large_object_download is True: swift_old_object_path = os.path.join( TEST_DATA_PATH, swift_object_name) swift_object_download_fname = swift_object_name + ".download" log.info("download object name: %s" % swift_object_download_fname) swift_object_download_path = os.path.join( TEST_DATA_PATH, swift_object_download_fname) log.info("download object path: %s" % swift_object_download_path) swift_object_downloaded = rgw.get_object( container_name_new, swift_object_name) with open(swift_object_download_path, "wb") as fp: fp.write(swift_object_downloaded[1]) old_object = utils.get_md5(swift_old_object_path) downloaded_obj = utils.get_md5(swift_object_download_path) log.info("s3_object_downloaded_md5: %s" % old_object) log.info("s3_object_uploaded_md5: %s" % downloaded_obj) if str(old_object) == str(downloaded_obj): log.info("md5 match") utils.exec_shell_cmd("rm -rf %s" % swift_object_download_path) else: raise TestExecError("md5 mismatch") else: container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=cc) container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "args": [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation failed") for oc, size in list(config.mapped_sizes.items()): swift_object_name = fill_container(rgw, container_name, user_names[0], oc, cc, size) # download object swift_object_download_fname = swift_object_name + ".download" log.info("download object name: %s" % swift_object_download_fname) swift_object_download_path = os.path.join( TEST_DATA_PATH, swift_object_download_fname) log.info("download object path: %s" % swift_object_download_path) swift_object_downloaded = rgw.get_object( container_name, swift_object_name) with open(swift_object_download_path, "w") as fp: fp.write(str(swift_object_downloaded[1])) # modify and re-upload log.info("appending new message to test_data") message_to_append = "adding new msg after download" fp = open(swift_object_download_path, "a+") fp.write(message_to_append) fp.close() with open(swift_object_download_path, "r") as fp: rgw.put_object( container_name, swift_object_name, contents=fp.read(), content_type="text/plain", ) # delete object log.info("deleting swift object") rgw.delete_object(container_name, swift_object_name) # delete container log.info("deleting swift container") rgw.delete_container(container_name) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") reusable.remove_user(tenant_user_info, tenant=tenant)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) rgw_service = RGWService() # create pool pool_name = '.rgw.buckets.special' pg_num = '8' pgp_num = '8' pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % ( pool_name, pg_num, pgp_num) pool_create_exec = utils.exec_shell_cmd(pool_create) if pool_create_exec is False: raise TestExecError("Pool creation failed") # create realm realm_name = 'buz-tickets' log.info('creating realm name') realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s' % realm_name realm_create_exec = utils.exec_shell_cmd(realm_create) if realm_create_exec is False: raise TestExecError("cmd execution failed") # sample output of create realm """ { "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62", "name": "buz-tickets", "current_period": "1950b710-3e63-4c41-a19e-46a715000980", "epoch": 1 } """ log.info('modify zonegroup ') modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master' % realm_name modify_exec = utils.exec_shell_cmd(modify) if modify_exec is False: raise TestExecError("cmd execution failed") # get the zonegroup zonegroup_file = 'zonegroup.json' get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup) if get_zonegroup_exec is False: raise TestExecError("cmd execution failed") add_to_placement_targets = {"name": "special-placement", "tags": []} fp = open(zonegroup_file, 'r') zonegroup_txt = fp.read() fp.close() log.info('got zonegroup info: \n%s' % zonegroup_txt) zonegroup = json.loads(zonegroup_txt) log.info('adding placement targets') zonegroup['placement_targets'].append(add_to_placement_targets) with open(zonegroup_file, 'w') as fp: json.dump(zonegroup, fp) zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set) if zonegroup_set_exec is False: raise TestExecError("cmd execution failed") log.info('zone group update completed') log.info('getting zone file') # get zone log.info('getting zone info') zone_file = 'zone.json' get_zone = 'sudo radosgw-admin zone --rgw-zone=default get > zone.json' get_zone_exec = utils.exec_shell_cmd(get_zone) if get_zone_exec is False: raise TestExecError("cmd execution failed") fp = open(zone_file, 'r') zone_info = fp.read() fp.close() log.info('zone_info :\n%s' % zone_info) zone_info_cleaned = json.loads(zone_info) special_placement_info = { "key": "special-placement", "val": { "index_pool": ".rgw.buckets.index", "data_pool": ".rgw.buckets.special", "data_extra_pool": ".rgw.buckets.extra" } } log.info('adding special placement info') zone_info_cleaned['placement_pools'].append(special_placement_info) with open(zone_file, 'w+') as fp: json.dump(zone_info_cleaned, fp) zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file zone_file_set_exec = utils.exec_shell_cmd(zone_file_set) if zone_file_set_exec is False: raise TestExecError("cmd execution failed") log.info('zone info updated ') zone_group_update_set = 'radosgw-admin period update --commit' zone_group_update_set_exec = utils.exec_shell_cmd(zone_group_update_set) log.info(zone_group_update_set_exec) restarted = rgw_service.restart() if restarted is False: raise TestExecError("service restart failed") if config.rgw_client == 'rgw': log.info('client type is rgw') rgw_user_info = s3_swift_lib.create_users(1) auth = Auth(rgw_user_info) rgw_conn = auth.do_auth() # create bucket bucket_name = utils.gen_bucket_name_from_userid( rgw_user_info['user_id'], 0) bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info) # create object s3_object_name = utils.gen_s3_object_name(bucket_name, 0) resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info) if config.rgw_client == 'swift': log.info('client type is swift') user_names = ['tuffy', 'scooby', 'max'] tenant = 'tenant' umgmt = UserMgmt() umgmt.create_tenant_user(tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0]) auth = Auth(user_info) rgw = auth.do_auth() container_name = utils.gen_bucket_name_from_userid( user_info['user_id'], rand_no=0) container = s3_swift_lib.resource_op({ 'obj': rgw, 'resource': 'put_container', 'args': [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation faield") swift_object_name = utils.gen_s3_object_name( '%s.container.%s' % (user_names[0], 0), 0) log.info('object name: %s' % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info('object path: %s' % object_path) object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(object_path, object_size) # upload object if data_info is False: TestExecError("data creation failed") log.info('uploading object: %s' % object_path) with open(object_path, 'r') as fp: rgw.put_object(container_name, swift_object_name, contents=fp.read(), content_type='text/plain')
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get("encryption_algorithm", None) is not None: log.info("encryption enabled, making ceph config changes") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) # ,config=config) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) else: rgw_conn = auth.do_auth() user_id = each_user["user_id"] # Creating a seed for multi-factor authentication log.info("Creating a seed for multi-factor authentication") cmd = "head -10 /dev/urandom | sha512sum | cut -b 1-30" SEED = utils.exec_shell_cmd(cmd) log.info( "Configure the one-time password generator oathtool and the back-end MFA system to use the same seed." ) test_totp = reusable.generate_totp(SEED) serial = "MFAtest" + str(random.randrange(1, 100)) if test_totp is False: raise TestExecError( "Failed to configure one-time password generator - oathtool") if config.test_ops["mfa_create"] is True: log.info("Create a new MFA TOTP token") cmd = f"time radosgw-admin mfa create --uid={user_id} --totp-serial={serial} --totp-seed={SEED}" mfa_create = utils.exec_shell_cmd(cmd) if mfa_create is False: raise TestExecError("Failed to create new MFA TOTP token!") # Verify no crash is seen with in correct syntax for mfa resync command BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1947862 if config.test_ops.get("mfa_resync_invalid_syntax") is True: log.info( "Validate the mfa resync command errors out with approriate message on invalid syntax" ) get_totp = reusable.generate_totp(SEED) cmd = f"radosgw-admin mfa resync --uid {user_id} --totp-serial={serial} --totp-seed={SEED} --totp-pin={get_totp}" mfa_resync_invalid_syntax = utils.exec_shell_cmd(cmd) if mfa_resync_invalid_syntax is False: log.info("appropriate usage message displayed") else: raise TestExecError("Usage message not displayed") if config.test_ops["mfa_check"] is True: log.info( "Test a multi-factor authentication (MFA) time-based one time password (TOTP) token." ) get_totp = reusable.generate_totp(SEED) cmd = ( "time radosgw-admin mfa check --uid=%s --totp-serial=%s --totp-pin=%s" % (each_user["user_id"], serial, get_totp)) cmd = f"time radosgw-admin mfa check --uid={user_id} --totp-serial={serial} --totp-pin={get_totp}" mfa_check = utils.exec_shell_cmd(cmd) if mfa_check is False: log.info( "Resynchronize a multi-factor authentication TOTP token in case of time skew or failed checks." ) previous_pin = reusable.generate_totp(SEED) log.info("Sleep of 30 seconds to fetch another totp") time.sleep(30) current_pin = reusable.generate_totp(SEED) cmd = "time radosgw-admin mfa resync --uid {user_id} --totp-serial {serial} --totp-pin {get_totp} --totp-pin {get_totp}" mfa_resync = utils.exec_shell_cmd(cmd) if mfa_resync is False: raise TestExecError("Failed to resync token") log.info( "Verify the token was successfully resynchronized by testing a new PIN" ) get_totp = reusable.generate_totp(SEED) cmd = f"time radosgw-admin mfa check --uid {user_id} --totp-serial {serial} --totp-pin {get_totp}" mfa_check_resync = utils.exec_shell_cmd(cmd) if "ok" not in mfa_check_resync: raise TestExecError("Failed to verify resync token") if config.test_ops["mfa_list"] is True: log.info("List MFA TOTP tokens") cmd = f"radosgw-admin mfa list --uid {user_id}" mfa_list = utils.exec_shell_cmd(cmd) if "MFAtest" in mfa_list: log.info("MFA token is listed for the given user") objects_created_list = [] if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get("enable_mfa_version", False): log.info("enable bucket versioning and MFA deletes") token, status = reusable.enable_mfa_versioning( bucket, rgw_conn, SEED, serial, each_user, write_bucket_io_info) if status is False: log.info( "trying again! AccessDenied could be a timing issue!" ) new_token = reusable.generate_totp(SEED) if token == new_token: log.info( "sleep of 30secs to generate another TOTP token" ) time.sleep(30) status = reusable.enable_mfa_versioning( bucket, rgw_conn, SEED, serial, each_user, write_bucket_io_info, ) if status is False: raise MFAVersionError( "Failed to enable MFA and versioning on the bucket!" ) if config.test_ops["create_object"] is True: # uploading data log.info( f"top level s3 objects to create: {config.objects_count}" ) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info(f"s3 object name: {s3_object_name}") s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info(f"s3 object path: {s3_object_path}") if config.test_ops.get("upload_type") == "multipart": log.info("upload type: multipart") reusable.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) else: log.info("upload type: normal") reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user, ) objects_created_list.append( (s3_object_name, s3_object_path)) # deleting the local file created after upload if config.local_file_delete is True: log.info( "deleting local file created after the upload") cmd = f"rm -rf {s3_object_path}" utils.exec_shell_cmd(cmd) # bucket list to check the objects cmd = f"radosgw-admin bucket list --bucket {bucket_name_to_create}" bucket_list = utils.exec_shell_cmd(cmd) if config.test_ops["delete_mfa_object"] is True: for s3_object_name, path in objects_created_list: log.info( "Deleting an object configured with MFA should have TOTP token" ) versions = bucket.object_versions.filter( Prefix=s3_object_name) for version in versions: log.info( f"key_name: {version.object_key} --> version_id: {version.version_id}" ) log.info("Deleting the object with TOTP token") get_totp = reusable.generate_totp(SEED) cmd = f"radosgw-admin object rm --object {version.object_key} --object-version {version.version_id} --totp-pin {get_totp} --bucket {bucket_name_to_create}" object_delete = utils.exec_shell_cmd(cmd) if object_delete is False: raise TestExecError( "Object deletion with MFA token failed!") log.info("Verify object is deleted permanently") cmd = f"radosgw-admin bucket list --bucket {bucket_name_to_create}" bucket_list = utils.exec_shell_cmd(cmd) if version.version_id in bucket_list: raise TestExecError( "Object version is still present, Failed to delete the object" ) if config.test_ops["delete_bucket"] is True: log.info(f"Deleting the bucket {bucket_name_to_create}") time.sleep(10) reusable.delete_bucket(bucket) if config.test_ops["remove_mfa"] is True: log.info( "Delete a multi-factor authentication (MFA) time-based one time password (TOTP) token" ) cmd = f"radosgw-admin mfa remove --uid {user_id} --totp-serial {serial}" mfa_remove = utils.exec_shell_cmd(cmd) if mfa_remove is False: raise TestExecError("MFA delete failed") log.info("Verify the MFA token is deleted") cmd = ( f"radosgw-admin mfa get --uid {user_id} --totp-serial {serial}" ) mfa_get = utils.exec_shell_cmd(cmd) if mfa_get is False: log.info("MFA token successfully deleted for user") else: raise TestExecError("MFA token delete for user failed") # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() config.rgw_lc_debug_interval = 30 config.rgw_lc_max_worker = 10 log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval)) _, version_name = utils.get_ceph_version() if "nautilus" in version_name: ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker)) else: ceph_conf.set_to_ceph_conf( section=None, option=ConfigOpts.rgw_lc_max_worker, value=str(config.rgw_lc_max_worker), ) ceph_conf.set_to_ceph_conf(section=None, option=ConfigOpts.rgw_lc_debug_interval, value="30") log.info("trying to restart services") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") config.user_count = 1 config.bucket_count = 1 # create user user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info, ssl=config.ssl) rgw_conn = auth.do_auth() rgw_conn2 = auth.do_auth_using_client() log.info("no of buckets to create: %s" % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1) obj_list = [] obj_tag = "suffix1=WMV1" bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info) prefix = list( map( lambda x: x, [ rule["Filter"].get("Prefix") or rule["Filter"]["And"].get("Prefix") for rule in config.lifecycle_conf ], )) prefix = prefix if prefix else ["dummy1"] if config.test_ops["enable_versioning"] is True: reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info) if config.test_ops["create_object"] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + "." + bucket.name + "." + str(oc) obj_list.append(s3_object_name) if config.test_ops["version_count"] > 0: for vc in range(config.test_ops["version_count"]): log.info("version count for %s is %s" % (s3_object_name, str(vc))) log.info("modifying data: %s" % s3_object_name) reusable.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, user_info, append_data=True, append_msg="hello object for version: %s\n" % str(vc), ) else: log.info("s3 objects to create: %s" % config.objects_count) reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_prefix_rule(bucket, config) if config.test_ops["delete_marker"] is True: life_cycle_rule_new = {"Rules": config.delete_marker_ops} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config) if config.test_ops["enable_versioning"] is False: if config.test_ops["create_object"] is True: for oc, size in list(config.mapped_sizes.items()): config.obj_size = size key = prefix.pop() prefix.insert(0, key) s3_object_name = key + "." + bucket.name + "." + str(oc) obj_list.append(s3_object_name) reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag) life_cycle_rule = {"Rules": config.lifecycle_conf} reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config) lc_ops.validate_and_rule(bucket, config) reusable.remove_user(user_info) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): test_info = AddTestInfo('RGW Dynamic Resharding test') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() try: test_info.started_info() log.info('starting IO') config.max_objects_per_shard = 10 config.no_of_shards = 10 config.user_count = 1 user_info = s3lib.create_users(config.user_count) user_info = user_info[0] auth = Auth(user_info) rgw_conn = auth.do_auth() config.bucket_count = 1 log.info('no of buckets to create: %s' % config.bucket_count) bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1) bucket = create_bucket_with_versioning(rgw_conn, user_info, bucket_name) upload_objects(user_info, bucket, config) log.info('sharding configuration will be added now.') if config.sharding_type == 'online': log.info('sharding type is online') # for online, # the number of shards should be greater than [ (no of objects)/(max objects per shard) ] # example: objects = 500 ; max object per shard = 10 # then no of shards should be at least 50 or more time.sleep(15) log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, config.max_objects_per_shard) ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding, True) num_shards_expected = config.objects_count / config.max_objects_per_shard log.info('num_shards_expected: %s' % num_shards_expected) log.info('trying to restart services ') srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') if config.sharding_type == 'offline': log.info('sharding type is offline') # for offline. # the number of shards will be the value set in the command. time.sleep(15) log.info('in offline sharding') cmd_exec = utils.exec_shell_cmd( 'radosgw-admin bucket reshard --bucket=%s --num-shards=%s' % (bucket.name, config.no_of_shards)) if cmd_exec is False: raise TestExecError( "offline resharding command execution failed") # upload_objects(user_info, bucket, config) log.info('s3 objects to create: %s' % config.objects_count) for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name( bucket.name, config.objects_count + oc) resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info) time.sleep(300) log.info('verification starts') op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name) json_doc = json.loads(op) bucket_id = json_doc['data']['bucket']['bucket_id'] op2 = utils.exec_shell_cmd( "radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id)) json_doc2 = json.loads((op2)) num_shards_created = json_doc2['data']['bucket_info']['num_shards'] log.info('no_of_shards_created: %s' % num_shards_created) log.info('no_of_shards_expected: %s' % num_shards_expected) if config.sharding_type == 'offline': if num_shards_expected != num_shards_created: raise TestExecError("expected number of shards not created") log.info('Expected number of shards created') if config.sharding_type == 'online': log.info( 'for online, ' 'number of shards created should be greater than or equal to number of expected shards' ) if int(num_shards_created) >= int(num_shards_expected): log.info('Expected number of shards created') else: raise TestExecError('Expected number of shards not created') read_io = ReadIOInfo() read_io.yaml_fname = 'io_info.yaml' read_io.verify_io() test_info.success_status('test passed') sys.exit(0) except Exception, e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): """ Executes test based on configuration passed Args: config(object): Test configuration """ io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) umgmt = UserMgmt() ceph_conf = CephConfOp() rgw_service = RGWService() # preparing data user_names = ["tom", "ram", "sam"] tenant = "tenant" tenant_user_info = umgmt.create_tenant_user( tenant_name=tenant, user_id=user_names[1], displayname=user_names[1] ) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[1]) auth = Auth(user_info, config.ssl) rgw = auth.do_auth() container_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=0) container = swiftlib.resource_op( {"obj": rgw, "resource": "put_container", "args": [container_name]} ) if container is False: raise TestExecError("Resource execution failed: container creation faield") for oc, size in list(config.mapped_sizes.items()): # upload objects to the container swift_object_name = fill_container( rgw, container_name, user_names[1], oc, 0, size ) # delete all uploaded objects log.info("deleting all swift objects") auth_response = rgw.get_auth() token = auth_response[1] # test.txt file should contain container_name with open("test.txt", "w") as f: f.write(container_name) ip_and_port = rgw.authurl.split("/")[2] proto = "https" if config.ssl else "http" url = f"{proto}://{ip_and_port}/swift/v1/?bulk-delete" test_file = open("test.txt", "r") headers = { "Accept": "application/json", "Content-Type": "text/plain", "X-Auth-Token": token, } response = requests.delete( url, headers=headers, verify=False, files={"form_field_name": test_file} ) if response.status_code == 200: log.info("Bulk delete succeeded") else: raise TestExecError( "Bulk delete failed with status code: %d" % response.status_code ) # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!")
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() test_info = AddTestInfo("create m buckets") conf_path = "/etc/ceph/%s.conf" % config.cluster_name ceph_conf = CephConfOp(conf_path) rgw_service = RGWService() try: test_info.started_info() # get user with open("user_details") as fout: all_users_info = simplejson.load(fout) for each_user in all_users_info: user_info = basic_io_structure.user( **{ "user_id": each_user["user_id"], "access_key": each_user["access_key"], "secret_key": each_user["secret_key"], }) write_user_info.add_user_info(user_info) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth() # enabling sharding if config.test_ops["sharding"]["enable"] is True: log.info("enabling sharding on buckets") max_shards = config.test_ops["sharding"]["max_shards"] log.info("making changes to ceph.conf") ceph_conf.set_to_ceph_conf( "global", ConfigOpts.rgw_override_bucket_index_max_shards, max_shards, ) log.info("trying to restart services ") srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info("RGW service restarted") # create buckets if config.test_ops["create_bucket"] is True: log.info("no of buckets to create: %s" % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc) log.info("creating bucket with name: %s" % bucket_name_to_create) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({ "obj": rgw_conn, "resource": "Bucket", "args": [bucket_name_to_create], }) created = s3lib.resource_op({ "obj": bucket, "resource": "create", "args": None, "extra_info": { "access_key": each_user["access_key"] }, }) if created is False: raise TestExecError( "Resource execution failed: bucket creation failed" ) if created is not None: response = HttpResponseParser(created) if response.status_code == 200: log.info("bucket created") else: raise TestExecError("bucket creation failed") else: raise TestExecError("bucket creation failed") if config.test_ops["sharding"]["enable"] is True: cmd = ( "radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id" % (bucket.name, config.cluster_name)) out = utils.exec_shell_cmd(cmd) b_id = (out.replace( '"', "").strip().split(":")[1].strip().replace(",", "")) cmd2 = ( "rados -p default.rgw.buckets.index ls --cluster %s | grep %s" % (config.cluster_name, b_id)) out = utils.exec_shell_cmd(cmd2) log.info( "got output from sharing verification.--------") test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
from v2.lib.exceptions import MFAVersionError, TestExecError from v2.lib.rgw_config_opts import ConfigOpts from v2.lib.s3.write_io_info import ( BasicIOInfoStructure, BucketIoInfo, IOInfoInitialize, KeyIoInfo, ) from v2.lib.sync_status import sync_status from v2.utils.utils import HttpResponseParser, RGWService io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() write_key_io_info = KeyIoInfo() rgw_service = RGWService() log = logging.getLogger() def create_bucket(bucket_name, rgw, user_info): log.info("creating bucket with name: %s" % bucket_name) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({ "obj": rgw, "resource": "Bucket", "args": [bucket_name] }) created = s3lib.resource_op({ "obj": bucket, "resource": "create",
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get('encryption_algorithm', None) is not None: log.info('encryption enabled, making ceph config changes') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{'signature_version': 's3v4'}) else: rgw_conn = auth.do_auth() # enabling sharding if config.test_ops['sharding']['enable'] is True: log.info('enabling sharding on buckets') max_shards = config.test_ops['sharding']['max_shards'] log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf( 'global', ConfigOpts.rgw_override_bucket_index_max_shards, str(max_shards)) log.info('trying to restart services ') srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') if config.test_ops['compression']['enable'] is True: compression_type = config.test_ops['compression']['type'] log.info('enabling compression') cmd = 'radosgw-admin zone get' out = utils.exec_shell_cmd(cmd) zone = json.loads(out) zone = zone.get("name") cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \ '--placement-id=default-placement --compression=%s' % (zone,compression_type) out = utils.exec_shell_cmd(cmd) try: data = json.loads(out) if data['placement_pools'][0]['val']['storage_classes'][ 'STANDARD']['compression_type'] == compression_type: log.info('Compression enabled successfully') else: raise ValueError('failed to enable compression') except ValueError as e: exit(str(e)) log.info('trying to restart rgw services ') srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') # create buckets if config.test_ops['create_bucket'] is True: log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) bucket = resuables.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops['create_object'] is True: # uploading data log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get('upload_type') == 'multipart': log.info('upload type: multipart') resuables.upload_mutipart_object( s3_object_name, bucket, TEST_DATA_PATH, config, each_user) else: log.info('upload type: normal') resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user) if config.test_ops['download_object'] is True: log.info('trying to download object: %s' % s3_object_name) s3_object_download_name = s3_object_name + "." + "download" s3_object_download_path = os.path.join( TEST_DATA_PATH, s3_object_download_name) log.info('s3_object_download_path: %s' % s3_object_download_path) log.info('downloading to filename: %s' % s3_object_download_name) if config.test_ops.get('encryption_algorithm', None) is not None: log.info('encryption download') log.info( 'encryption algorithm: %s' % config.test_ops['encryption_algorithm']) object_downloaded_status = bucket.download_file( s3_object_name, s3_object_download_path, ExtraArgs={ 'SSECustomerKey': encryption_key, 'SSECustomerAlgorithm': config.test_ops['encryption_algorithm'] }) else: object_downloaded_status = s3lib.resource_op({ 'obj': bucket, 'resource': 'download_file', 'args': [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError( "Resource execution failed: object download failed" ) if object_downloaded_status is None: log.info('object downloaded') s3_object_downloaded_md5 = utils.get_md5( s3_object_download_path) s3_object_uploaded_md5 = utils.get_md5( s3_object_path) log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5) log.info('s3_object_uploaded_md5: %s' % s3_object_uploaded_md5) if str(s3_object_uploaded_md5) == str( s3_object_downloaded_md5): log.info('md5 match') utils.exec_shell_cmd('rm -rf %s' % s3_object_download_path) else: raise TestExecError('md5 mismatch') if config.local_file_delete is True: log.info( 'deleting local file created after the upload') utils.exec_shell_cmd('rm -rf %s' % s3_object_path) # verification of shards after upload if config.test_ops['sharding']['enable'] is True: cmd = 'radosgw-admin metadata get bucket:%s | grep bucket_id' % bucket.name out = utils.exec_shell_cmd(cmd) b_id = out.replace( '"', '').strip().split(":")[1].strip().replace(',', '') cmd2 = 'rados -p default.rgw.buckets.index ls | grep %s' % b_id out = utils.exec_shell_cmd(cmd2) log.info( 'got output from sharing verification.--------') # print out bucket stats and verify in logs for compressed data by # comparing size_kb_utilized and size_kb_actual if config.test_ops['compression']['enable'] is True: cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name out = utils.exec_shell_cmd(cmd) # print out bucket stats and verify in logs for compressed data by # comparing size_kb_utilized and size_kb_actual if config.test_ops['compression']['enable'] is True: cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name out = utils.exec_shell_cmd(cmd) if config.test_ops['delete_bucket_object'] is True: log.info('listing all objects in bucket: %s' % bucket.name) objects = s3lib.resource_op({ 'obj': bucket, 'resource': 'objects', 'args': None }) log.info('objects :%s' % objects) all_objects = s3lib.resource_op({ 'obj': objects, 'resource': 'all', 'args': None }) log.info('all objects: %s' % all_objects) for obj in all_objects: log.info('object_name: %s' % obj.key) log.info('deleting all objects in bucket') objects_deleted = s3lib.resource_op({ 'obj': objects, 'resource': 'delete', 'args': None }) log.info('objects_deleted: %s' % objects_deleted) if objects_deleted is False: raise TestExecError( 'Resource execution failed: Object deletion failed' ) if objects_deleted is not None: response = HttpResponseParser(objects_deleted[0]) if response.status_code == 200: log.info('objects deleted ') else: raise TestExecError("objects deletion failed") else: raise TestExecError("objects deletion failed") log.info('deleting bucket: %s' % bucket.name) # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete') bucket_deleted_status = s3lib.resource_op({ 'obj': bucket, 'resource': 'delete', 'args': None }) log.info('bucket_deleted_status: %s' % bucket_deleted_status) if bucket_deleted_status is not None: response = HttpResponseParser( bucket_deleted_status) if response.status_code == 204: log.info('bucket deleted ') else: raise TestExecError("bucket deletion failed") else: raise TestExecError("bucket deletion failed") # disable compression after test if config.test_ops['compression']['enable'] is True: log.info('disable compression') cmd = 'radosgw-admin zone get' out = utils.exec_shell_cmd(cmd) zone = json.loads(out) zone = zone.get("name") cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \ '--placement-id=default-placement --compression=none' % zone out = utils.exec_shell_cmd(cmd) srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted')
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() test_info = AddTestInfo('create m buckets') conf_path = '/etc/ceph/%s.conf' % config.cluster_name ceph_conf = CephConfOp(conf_path) rgw_service = RGWService() try: test_info.started_info() # get user with open('user_details') as fout: all_users_info = simplejson.load(fout) for each_user in all_users_info: user_info = basic_io_structure.user( **{ 'user_id': each_user['user_id'], 'access_key': each_user['access_key'], 'secret_key': each_user['secret_key'] }) write_user_info.add_user_info(user_info) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth() # enabling sharding if config.test_ops['sharding']['enable'] is True: log.info('enabling sharding on buckets') max_shards = config.test_ops['sharding']['max_shards'] log.info('making changes to ceph.conf') ceph_conf.set_to_ceph_conf( 'global', ConfigOpts.rgw_override_bucket_index_max_shards, max_shards) log.info('trying to restart services ') srv_restarted = rgw_service.restart() time.sleep(10) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') # create buckets if config.test_ops['create_bucket'] is True: log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({ 'obj': rgw_conn, 'resource': 'Bucket', 'args': [bucket_name_to_create] }) created = s3lib.resource_op({ 'obj': bucket, 'resource': 'create', 'args': None, 'extra_info': { 'access_key': each_user['access_key'] } }) if created is False: raise TestExecError( "Resource execution failed: bucket creation failed" ) if created is not None: response = HttpResponseParser(created) if response.status_code == 200: log.info('bucket created') else: raise TestExecError("bucket creation failed") else: raise TestExecError("bucket creation failed") if config.test_ops['sharding']['enable'] is True: cmd = 'radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id' \ % (bucket.name, config.cluster_name) out = utils.exec_shell_cmd(cmd) b_id = out.replace( '"', '').strip().split(":")[1].strip().replace(',', '') cmd2 = 'rados -p default.rgw.buckets.index ls --cluster %s | grep %s' \ % (config.cluster_name, b_id) out = utils.exec_shell_cmd(cmd2) log.info( 'got output from sharing verification.--------') test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() write_bucket_io_info = BucketIoInfo() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() objects_created_list = [] # create user all_users_info = s3lib.create_users(config.user_count) if config.test_ops.get('encryption_algorithm', None) is not None: log.info('encryption enabled, making ceph config changes') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false") srv_restarted = rgw_service.restart() time.sleep(30) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') # making changes to max_objects_per_shard and rgw_gc_obj_min_wait to ceph.conf log.info('making changes to ceph.conf') log.info(f'rgw_max_objs_per_shard parameter set to {str(config.max_objects_per_shard)}') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard)) ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding, 'True') log.info(f'rgw gc obj min wait configuration parameter set to {str(config.rgw_gc_obj_min_wait)}') ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,str(config.rgw_gc_obj_min_wait)) sleep_time = 10 log.info(f'Restarting RGW service and waiting for {sleep_time} seconds') srv_restarted = rgw_service.restart() time.sleep(sleep_time) if srv_restarted is False: raise TestExecError("RGW service restart failed") else: log.info('RGW service restarted') for each_user in all_users_info: # authenticate auth = Auth(each_user, ssl=config.ssl) if config.use_aws4 is True: rgw_conn = auth.do_auth(**{'signature_version': 's3v4'}) else: rgw_conn = auth.do_auth() objects_created_list = [] if config.test_ops['create_bucket'] is True: log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): log.info(f'creating {str(bc)} bucket') bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user) if config.test_ops.get('enable_version', False): log.info('enable bucket version') reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info) if config.test_ops['create_object'] is True: log.info('s3 objects to create: %s' % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size s3_object_name = utils.gen_s3_object_name(bucket.name, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) if config.test_ops.get('enable_version', False): log.info('upload versioned objects') reusable.upload_version_object(config, each_user, rgw_conn, s3_object_name, config.obj_size, bucket, TEST_DATA_PATH) else: log.info('upload type: normal') reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user) objects_created_list.append((s3_object_name, s3_object_path)) #deleting the local file created after upload if config.local_file_delete is True: log.info('deleting local file created after the upload') utils.exec_shell_cmd('rm -rf %s' % s3_object_path) # listing the objects if config.test_ops.get('list_objects', False): if config.test_ops.get('enable_version', False): for name,path in objects_created_list: reusable.list_versioned_objects(bucket,name,path,rgw_conn) else: reusable.list_objects(bucket) if config.test_ops.get('delete_bucket_object', False): if config.test_ops.get('enable_version', False): for name, path in objects_created_list: print("name, path",name,path) versions = bucket.object_versions.filter(Prefix=name) log.info('deleting s3_obj keys and its versions') s3_obj = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, name]}) log.info('deleting versions for s3 obj: %s' % name) for version in versions: log.info('trying to delete obj version: %s' % version.version_id) del_obj_version = s3lib.resource_op({'obj': s3_obj, 'resource': 'delete', 'kwargs': dict(VersionId=version.version_id)}) log.info('response:\n%s' % del_obj_version) if del_obj_version is not None: response = HttpResponseParser(del_obj_version) if response.status_code == 204: log.info('version deleted ') reusable.delete_version_object(bucket,version.version_id, path, rgw_conn, each_user) else: raise TestExecError("version deletion failed") else: raise TestExecError("version deletion failed") else: reusable.delete_objects(bucket) log.info(f'deleting the bucket {bucket_name_to_create}') reusable.delete_bucket(bucket) # check for any crashes during the execution crash_info=reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") #remove the user reusable.remove_user(each_user)