def upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info): log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) s3_object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(s3_object_path, s3_object_size) if data_info is False: TestExecError("data creation failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': user_info['access_key']}, **data_info) object_uploaded_status = s3lib.resource_op({ 'obj': bucket, 'resource': 'upload_file', 'args': [s3_object_path, s3_object_name], 'extra_info': upload_info }) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded')
def test_acls_public_read(u1_rgw_conn, u1, u2_rgw_conn, u1_bucket, u2_bucket): # test for public_read s3_ops = ResourceOps() u1_bucket_acl = s3_ops.resource_op(u1_rgw_conn, 'BucketAcl', u1_bucket.name) log.info('setting bucket acl: %s' % ACLS[1]) u1_bucket_acl.put(ACL=ACLS[1]) # access bucket_info of u1_bucket from u2 log.info('u1 bucket info') u1_bucket_info = s3_ops.resource_op(u1_rgw_conn, 'Bucket', u1_bucket.name) log.info(u1_bucket_info.name) log.info(u1_bucket_info.creation_date) log.info(u1_bucket_info.load()) s3_object_name = utils.gen_s3_object_name(u1_bucket.name, rand_no=0) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) s3_object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(s3_object_path, s3_object_size) if data_info is False: TestExecError("data creation failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': u1['access_key']}, **data_info) object_uploaded_status = s3_ops.resource_op(u1_bucket, 'upload_file', s3_object_path, s3_object_name, **upload_info) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') log.info( 'trying to access u1 bucket and its objects info from u2 after setting u1 bucket acls to public read' ) access_u1_bucket_from_u2 = s3_ops.resource_op(u2_rgw_conn, 'Bucket', u1_bucket.name) try: all_objects = access_u1_bucket_from_u2.objects.all() for obj in all_objects: log.info('obj name: %s' % obj.key) except Exception as e: msg = 'access given to read, but still failing to read' raise TestExecError(msg) log.info('tryring to delete u1_bucket from u2') try: u1_bucket_deleted_response = access_u1_bucket_from_u2.delete() response = HttpResponseParser(u1_bucket_deleted_response) log.info(response) except Exception as e: msg = 'access not given to write, hence fialing' log.info(msg) else: raise TestExecError("acces not given, but still bucket got deleted")
def upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info): log.info("s3 object name: %s" % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info("s3 object path: %s" % s3_object_path) s3_object_size = utils.get_file_size( config.objects_size_range["min"], config.objects_size_range["max"] ) data_info = manage_data.io_generator(s3_object_path, s3_object_size) if data_info is False: TestExecError("data creation failed") log.info("uploading s3 object: %s" % s3_object_path) upload_info = dict({"access_key": user_info["access_key"]}, **data_info) object_uploaded_status = s3lib.resource_op( { "obj": bucket, "resource": "upload_file", "args": [s3_object_path, s3_object_name], "extra_info": upload_info, } ) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info("object uploaded")
def test_exec(config): test_info = AddTestInfo('test swift user key gen') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) umgmt = UserMgmt() try: test_info.started_info() # preparing data user_names = ['tuffy', 'scooby', 'max'] tenant = 'tenant' tenant_user_info = umgmt.create_tenant_user( tenant_name=tenant, user_id=user_names[0], displayname=user_names[0], cluster_name=config.cluster_name) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0], cluster_name=config.cluster_name) auth = Auth(user_info) rgw = auth.do_auth() for cc in range(config.container_count): container_name = utils.gen_bucket_name_from_userid( user_info['user_id'], rand_no=cc) container = swiftlib.resource_op({ 'obj': rgw, 'resource': 'put_container', 'args': [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation faield") for oc in range(config.objects_count): swift_object_name = utils.gen_s3_object_name( '%s.container.%s' % (user_names[0], cc), oc) log.info('object name: %s' % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info('object path: %s' % object_path) object_size = utils.get_file_size( config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(object_path, object_size) # upload object if data_info is False: TestExecError("data creation failed") log.info('uploading object: %s' % object_path) with open(object_path, 'r') as fp: rgw.put_object(container_name, swift_object_name, contents=fp.read(), content_type='text/plain') # download object swift_object_download_fname = swift_object_name + ".download" log.info('download object name: %s' % swift_object_download_fname) swift_object_download_path = os.path.join( TEST_DATA_PATH, swift_object_download_fname) log.info('download object path: %s' % swift_object_download_path) swift_object_downloaded = rgw.get_object( container_name, swift_object_name) with open(swift_object_download_path, 'w') as fp: fp.write(swift_object_downloaded[1]) # modify and re-upload log.info('appending new message to test_data') message_to_append = 'adding new msg after download' fp = open(swift_object_download_path, 'a+') fp.write(message_to_append) fp.close() with open(swift_object_download_path, 'r') as fp: rgw.put_object(container_name, swift_object_name, contents=fp.read(), content_type='text/plain') # delete object log.info('deleting swift object') rgw.delete_object(container_name, swift_object_name) # delete container log.info('deleting swift container') rgw.delete_container(container_name) test_info.success_status('test passed') sys.exit(0) except Exception, e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): test_info = AddTestInfo('create m buckets with n objects') try: test_info.started_info() # get user with open('user_details') as fout: all_users_info = simplejson.load(fout) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth_using_client() rgw = auth.do_auth() bucket_list = [] buckets = rgw_conn.list_buckets() log.info('buckets are %s' % buckets) for each_bucket in buckets['Buckets']: bucket_list.append(each_bucket['Name']) for bucket_name in bucket_list: # create 'bucket' resource object bucket = rgw.Bucket(bucket_name) log.info('In bucket: %s' % bucket_name) if config.test_ops['create_object'] is True: # uploading data log.info('s3 objects to create: %s' % config.objects_count) for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name(bucket_name, oc) log.info('s3 object name: %s' % s3_object_name) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info('s3 object path: %s' % s3_object_path) s3_object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(s3_object_path, s3_object_size) if data_info is False: TestExecError("data creation failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': each_user['access_key']}, **data_info) # object_uploaded_status = bucket.upload_file(s3_object_path, s3_object_name) object_uploaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'upload_file', 'args': [s3_object_path, s3_object_name], 'extra_info': upload_info}) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') if config.test_ops['download_object'] is True: log.info('trying to download object: %s' % s3_object_name) s3_object_download_name = s3_object_name + "." + "download" s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name) log.info('s3_object_download_path: %s' % s3_object_download_path) log.info('downloading to filename: %s' % s3_object_download_name) # object_downloaded_status = bucket.download_file(s3_object_path, s3_object_name) object_downloaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'download_file', 'args': [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError("Resource execution failed: object download failed") if object_downloaded_status is None: log.info('object downloaded') if config.test_ops['delete_bucket_object'] is True: log.info('listing all objects in bucket: %s' % bucket.name) # objects = s3_ops.resource_op(bucket, 'objects', None) objects = s3lib.resource_op({'obj': bucket, 'resource': 'objects', 'args': None}) log.info('objects :%s' % objects) # all_objects = s3_ops.resource_op(objects, 'all') all_objects = s3lib.resource_op({'obj': objects, 'resource': 'all', 'args': None}) log.info('all objects: %s' % all_objects) for obj in all_objects: log.info('object_name: %s' % obj.key) log.info('deleting all objects in bucket') # objects_deleted = s3_ops.resource_op(objects, 'delete') objects_deleted = s3lib.resource_op({'obj': objects, 'resource': 'delete', 'args': None}) log.info('objects_deleted: %s' % objects_deleted) if objects_deleted is False: raise TestExecError('Resource execution failed: Object deletion failed') if objects_deleted is not None: response = HttpResponseParser(objects_deleted[0]) if response.status_code == 200: log.info('objects deleted ') else: raise TestExecError("objects deletion failed") else: raise TestExecError("objects deletion failed") # wait for object delete info to sync time.sleep(60) log.info('deleting bucket: %s' % bucket.name) # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete') bucket_deleted_status = s3lib.resource_op({'obj': bucket, 'resource': 'delete', 'args': None}) log.info('bucket_deleted_status: %s' % bucket_deleted_status) if bucket_deleted_status is not None: response = HttpResponseParser(bucket_deleted_status) if response.status_code == 204: log.info('bucket deleted ') else: raise TestExecError("bucket deletion failed") else: raise TestExecError("bucket deletion failed") test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): test_info = AddTestInfo("test swift user key gen") io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: test_info.started_info() # preparing data user_names = ["tuffy", "scooby", "max"] tenant1 = "tenant" cmd = ( 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s --cluster %s' % (user_names[0], user_names[0], tenant1, config.cluster_name) ) out = utils.exec_shell_cmd(cmd) if out is False: raise TestExecError("RGW User creation error") log.info("output :%s" % out) v1_as_json = json.loads(out) log.info("creted user_id: %s" % v1_as_json["user_id"]) cmd2 = ( "radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s" % (tenant1, user_names[0], user_names[0], tenant1, config.cluster_name) ) out2 = utils.exec_shell_cmd(cmd2) if out2 is False: raise TestExecError("sub-user creation error") v2_as_json = json.loads(out2) log.info("created subuser: %s" % v2_as_json["subusers"][0]["id"]) cmd3 = ( "radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret " "--cluster %s" % (user_names[0], user_names[0], tenant1, tenant1, config.cluster_name) ) out3 = utils.exec_shell_cmd(cmd3) if out3 is False: raise TestExecError("secret_key gen error") v3_as_json = json.loads(out3) log.info( "created subuser: %s\nsecret_key generated: %s" % ( v3_as_json["swift_keys"][0]["user"], v3_as_json["swift_keys"][0]["secret_key"], ) ) user_info = { "user_id": v3_as_json["swift_keys"][0]["user"], "key": v3_as_json["swift_keys"][0]["secret_key"], } auth = Auth(user_info) rgw = auth.do_auth() for cc in range(config.container_count): container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=cc ) container = swiftlib.resource_op( {"obj": rgw, "resource": "put_container", "args": [container_name]} ) if container is False: raise TestExecError( "Resource execution failed: container creation faield" ) for oc in range(config.objects_count): swift_object_name = utils.gen_s3_object_name( "%s.container.%s" % (user_names[0], cc), oc ) log.info("object name: %s" % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info("object path: %s" % object_path) object_size = utils.get_file_size( config.objects_size_range["min"], config.objects_size_range["max"] ) data_info = manage_data.io_generator(object_path, object_size) if data_info is False: TestExecError("data creation failed") log.info("uploading object: %s" % object_path) with open(object_path, "r") as fp: rgw.put_object( container_name, swift_object_name, contents=fp.read(), content_type="text/plain", ) test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) rgw_service = RGWService() # create pool pool_name = '.rgw.buckets.special' pg_num = '8' pgp_num = '8' pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % ( pool_name, pg_num, pgp_num) pool_create_exec = utils.exec_shell_cmd(pool_create) if pool_create_exec is False: raise TestExecError("Pool creation failed") # create realm realm_name = 'buz-tickets' log.info('creating realm name') realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s' % realm_name realm_create_exec = utils.exec_shell_cmd(realm_create) if realm_create_exec is False: raise TestExecError("cmd execution failed") # sample output of create realm """ { "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62", "name": "buz-tickets", "current_period": "1950b710-3e63-4c41-a19e-46a715000980", "epoch": 1 } """ log.info('modify zonegroup ') modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master' % realm_name modify_exec = utils.exec_shell_cmd(modify) if modify_exec is False: raise TestExecError("cmd execution failed") # get the zonegroup zonegroup_file = 'zonegroup.json' get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup) if get_zonegroup_exec is False: raise TestExecError("cmd execution failed") add_to_placement_targets = {"name": "special-placement", "tags": []} fp = open(zonegroup_file, 'r') zonegroup_txt = fp.read() fp.close() log.info('got zonegroup info: \n%s' % zonegroup_txt) zonegroup = json.loads(zonegroup_txt) log.info('adding placement targets') zonegroup['placement_targets'].append(add_to_placement_targets) with open(zonegroup_file, 'w') as fp: json.dump(zonegroup, fp) zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set) if zonegroup_set_exec is False: raise TestExecError("cmd execution failed") log.info('zone group update completed') log.info('getting zone file') # get zone log.info('getting zone info') zone_file = 'zone.json' get_zone = 'sudo radosgw-admin zone --rgw-zone=default get > zone.json' get_zone_exec = utils.exec_shell_cmd(get_zone) if get_zone_exec is False: raise TestExecError("cmd execution failed") fp = open(zone_file, 'r') zone_info = fp.read() fp.close() log.info('zone_info :\n%s' % zone_info) zone_info_cleaned = json.loads(zone_info) special_placement_info = { "key": "special-placement", "val": { "index_pool": ".rgw.buckets.index", "data_pool": ".rgw.buckets.special", "data_extra_pool": ".rgw.buckets.extra" } } log.info('adding special placement info') zone_info_cleaned['placement_pools'].append(special_placement_info) with open(zone_file, 'w+') as fp: json.dump(zone_info_cleaned, fp) zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file zone_file_set_exec = utils.exec_shell_cmd(zone_file_set) if zone_file_set_exec is False: raise TestExecError("cmd execution failed") log.info('zone info updated ') zone_group_update_set = 'radosgw-admin period update --commit' zone_group_update_set_exec = utils.exec_shell_cmd(zone_group_update_set) log.info(zone_group_update_set_exec) restarted = rgw_service.restart() if restarted is False: raise TestExecError("service restart failed") if config.rgw_client == 'rgw': log.info('client type is rgw') rgw_user_info = s3_swift_lib.create_users(1) auth = Auth(rgw_user_info) rgw_conn = auth.do_auth() # create bucket bucket_name = utils.gen_bucket_name_from_userid( rgw_user_info['user_id'], 0) bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info) # create object s3_object_name = utils.gen_s3_object_name(bucket_name, 0) resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info) if config.rgw_client == 'swift': log.info('client type is swift') user_names = ['tuffy', 'scooby', 'max'] tenant = 'tenant' umgmt = UserMgmt() umgmt.create_tenant_user(tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0]) auth = Auth(user_info) rgw = auth.do_auth() container_name = utils.gen_bucket_name_from_userid( user_info['user_id'], rand_no=0) container = s3_swift_lib.resource_op({ 'obj': rgw, 'resource': 'put_container', 'args': [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation faield") swift_object_name = utils.gen_s3_object_name( '%s.container.%s' % (user_names[0], 0), 0) log.info('object name: %s' % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info('object path: %s' % object_path) object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(object_path, object_size) # upload object if data_info is False: TestExecError("data creation failed") log.info('uploading object: %s' % object_path) with open(object_path, 'r') as fp: rgw.put_object(container_name, swift_object_name, contents=fp.read(), content_type='text/plain')
def test_exec(rgw_user_info_file, config): test_info = AddTestInfo("Test Basic IO on S3") io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) write_user_info = AddUserInfo() try: test_info.started_info() with open(rgw_user_info_yaml, "r") as f: rgw_user_info = yaml.safe_load(f) mount_point = rgw_user_info["nfs_mnt_point"] nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file) mounted = nfs_ganesha.initialize(write_io_info=False) if mounted is False: raise TestExecError("mount failed") if (nfs_ganesha.rgw_user_info["nfs_version"] == 4 and nfs_ganesha.rgw_user_info["Pseudo"] is not None): log.info("nfs version: 4") log.info("adding Pseudo path to writable mount point") mount_point = os.path.join(mount_point, nfs_ganesha.rgw_user_info["Pseudo"]) log.info("writable mount point with Pseudo: %s" % mount_point) log.info("authenticating rgw user") # authenticate auth = Auth(rgw_user_info) rgw_conn = auth.do_auth() # add user_info io_info yaml file user_info_add = basic_io_structure.user(**rgw_user_info) write_user_info.add_user_info(user_info_add) if config.io_op_config.get("create", None) is True: # create buckets for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( rgw_user_info["user_id"], rand_no=bc) bucket = s3_reusables.create_bucket(bucket_name_to_create, rgw_conn, rgw_user_info) # uploading data log.info("s3 objects to create: %s" % config.objects_count) for oc in range(config.objects_count): s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, oc) config.obj_size = utils.get_file_size( config.objects_size_range.get("min"), config.objects_size_range.get("max"), ) s3_reusables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info) log.info("verification Starts on NFS mount after %s seconds" % SLEEP_TIME) time.sleep(SLEEP_TIME) read_io_info_on_nfs = ReadIOInfoOnNFS(mount_point) read_io_info_on_nfs.yaml_fname = "io_info.yaml" read_io_info_on_nfs.initialize_verify_io() read_io_info_on_nfs.verify_if_basedir_created() read_io_info_on_nfs.verify_if_files_created() log.info("verification complete, data intact") created_buckets = read_io_info_on_nfs.base_dirs created_objects = read_io_info_on_nfs.files if config.io_op_config.get("delete", None) is True: log.info("delete operation starts") for bucket_name in created_buckets: bucket = s3lib.resource_op({ "obj": rgw_conn, "resource": "Bucket", "args": [os.path.basename(bucket_name)], }) # buckets are base dirs in NFS objects = s3lib.resource_op({ "obj": bucket, "resource": "objects", "args": None }) log.info("deleting all objects in bucket") objects_deleted = s3lib.resource_op({ "obj": objects, "resource": "delete", "args": None }) log.info("objects_deleted: %s" % objects_deleted) if objects_deleted is False: raise TestExecError( "Resource execution failed: Object deletion failed" ) if objects_deleted is not None: response = HttpResponseParser(objects_deleted[0]) if response.status_code == 200: log.info("objects deleted ") else: raise TestExecError("objects deletion failed") else: raise TestExecError("objects deletion failed") log.info("deleting bucket: %s" % bucket.name) bucket_deleted_status = s3lib.resource_op({ "obj": bucket, "resource": "delete", "args": None }) log.info("bucket_deleted_status: %s" % bucket_deleted_status) if bucket_deleted_status is not None: response = HttpResponseParser(bucket_deleted_status) if response.status_code == 204: log.info("bucket deleted ") else: raise TestExecError("bucket deletion failed") else: raise TestExecError("bucket deletion failed") log.info( "verification on NFS will start after %s seconds for delete operation" % SLEEP_TIME) time.sleep(200) for basedir in created_buckets: exists = os.path.exists(basedir) log.info("exists status: %s" % exists) if exists is True: raise TestExecError( "Basedir or Basedir: %s not deleted on NFS" % basedir) log.info("basedirs deleted") for each_file in created_objects: log.info("verifying existence for: %s" % each_file["file"]) exists = os.path.exists(each_file["file"]) if exists: raise TestExecError("files not created") log.info("file deleted") log.info( "verification of files complete, files exists and data intact" ) if config.io_op_config.get("move", None) is True: log.info("move operation starts") for each_file in created_objects: # in s3 move operation is achieved by copying the same object with the new name and # deleting the old object log.info("move operation for :%s" % each_file["file"]) new_obj_name = os.path.basename( each_file["file"]) + ".moved" log.info("new file name: %s" % new_obj_name) new_object = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [each_file["bucket"], new_obj_name], }) new_object.copy_from( CopySource="%s/%s" % (each_file["bucket"], os.path.basename( each_file["file"]))) # old object name old_object = s3lib.resource_op({ "obj": rgw_conn, "resource": "Object", "args": [ each_file["bucket"], os.path.basename(each_file["file"]), ], }) old_object.delete() each_file["file"] = os.path.abspath( os.path.join(mount_point, each_file["bucket"], new_obj_name)) log.info( "verification on NFS for move operation will start after %s seconds" % SLEEP_TIME) time.sleep(SLEEP_TIME) read_io_info_on_nfs.verify_if_files_created() log.info("move completed, data intact") test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
def test_exec(config): test_info = AddTestInfo("test swift user key gen") io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) umgmt = UserMgmt() try: test_info.started_info() # preparing data user_names = ["tuffy", "scooby", "max"] tenant = "tenant" tenant_user_info = umgmt.create_tenant_user( tenant_name=tenant, user_id=user_names[0], displayname=user_names[0], cluster_name=config.cluster_name, ) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0], cluster_name=config.cluster_name) auth = Auth(user_info) rgw = auth.do_auth() for cc in range(config.container_count): container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=cc) container = swiftlib.resource_op({ "obj": rgw, "resource": "put_container", "args": [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation faield") for oc in range(config.objects_count): swift_object_name = utils.gen_s3_object_name( "%s.container.%s" % (user_names[0], cc), oc) log.info("object name: %s" % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info("object path: %s" % object_path) object_size = utils.get_file_size( config.objects_size_range["min"], config.objects_size_range["max"]) data_info = manage_data.io_generator(object_path, object_size) # upload object if data_info is False: TestExecError("data creation failed") log.info("uploading object: %s" % object_path) with open(object_path, "r") as fp: rgw.put_object( container_name, swift_object_name, contents=fp.read(), content_type="text/plain", ) # download object swift_object_download_fname = swift_object_name + ".download" log.info("download object name: %s" % swift_object_download_fname) swift_object_download_path = os.path.join( TEST_DATA_PATH, swift_object_download_fname) log.info("download object path: %s" % swift_object_download_path) swift_object_downloaded = rgw.get_object( container_name, swift_object_name) with open(swift_object_download_path, "w") as fp: fp.write(swift_object_downloaded[1]) # modify and re-upload log.info("appending new message to test_data") message_to_append = "adding new msg after download" fp = open(swift_object_download_path, "a+") fp.write(message_to_append) fp.close() with open(swift_object_download_path, "r") as fp: rgw.put_object( container_name, swift_object_name, contents=fp.read(), content_type="text/plain", ) # delete object log.info("deleting swift object") rgw.delete_object(container_name, swift_object_name) # delete container log.info("deleting swift container") rgw.delete_container(container_name) test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
def test_exec(config): test_info = AddTestInfo("storage_policy for %s" % config.rgw_client) io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) rgw_service = RGWService() try: # create pool pool_name = ".rgw.buckets.special" pg_num = "8" pgp_num = "8" pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % ( pool_name, pg_num, pgp_num, ) pool_create_exec = utils.exec_shell_cmd(pool_create) if pool_create_exec is False: raise TestExecError("Pool creation failed") # create realm realm_name = "buz-tickets" log.info("creating realm name") realm_create = ( "sudo radosgw-admin realm create --rgw-realm=%s --default" % realm_name ) realm_create_exec = utils.exec_shell_cmd(realm_create) if realm_create_exec is False: raise TestExecError("cmd execution failed") # sample output of create realm """ { "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62", "name": "buz-tickets", "current_period": "1950b710-3e63-4c41-a19e-46a715000980", "epoch": 1 } """ log.info("modify zonegroup ") modify = ( "sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default" % realm_name ) modify_exec = utils.exec_shell_cmd(modify) if modify_exec is False: raise TestExecError("cmd execution failed") # get the zonegroup zonegroup_file = "zonegroup.json" get_zonegroup = ( "sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s" % zonegroup_file ) get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup) if get_zonegroup_exec is False: raise TestExecError("cmd execution failed") add_to_placement_targets = {"name": "special-placement", "tags": []} fp = open(zonegroup_file, "r") zonegroup_txt = fp.read() fp.close() log.info("got zonegroup info: \n%s" % zonegroup_txt) zonegroup = json.loads(zonegroup_txt) log.info("adding placement targets") zonegroup["placement_targets"].append(add_to_placement_targets) with open(zonegroup_file, "w") as fp: json.dump(zonegroup, fp) zonegroup_set = "sudo radosgw-admin zonegroup set < %s" % zonegroup_file zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set) if zonegroup_set_exec is False: raise TestExecError("cmd execution failed") log.info("zone group update completed") log.info("getting zone file") # get zone log.info("getting zone info") zone_file = "zone.json" get_zone = "sudo radosgw-admin zone --rgw-zone=default get > zone.json" get_zone_exec = utils.exec_shell_cmd(get_zone) if get_zone_exec is False: raise TestExecError("cmd execution failed") fp = open(zone_file, "r") zone_info = fp.read() fp.close() log.info("zone_info :\n%s" % zone_info) zone_info_cleaned = json.loads(zone_info) special_placement_info = { "key": "special-placement", "val": { "index_pool": ".rgw.buckets.index", "data_pool": ".rgw.buckets.special", "data_extra_pool": ".rgw.buckets.extra", }, } log.info("adding special placement info") zone_info_cleaned["placement_pools"].append(special_placement_info) print(zone_info_cleaned) with open(zone_file, "w+") as fp: json.dump(zone_info_cleaned, fp) zone_file_set = "sudo radosgw-admin zone set < %s" % zone_file zone_file_set_exec = utils.exec_shell_cmd(zone_file_set) if zone_file_set_exec is False: raise TestExecError("cmd execution failed") log.info("zone info updated ") restarted = rgw_service.restart() if restarted is False: raise TestExecError("service restart failed") if config.rgw_client == "rgw": log.info("client type is rgw") rgw_user_info = s3_swift_lib.create_users(1) auth = Auth(rgw_user_info) rgw_conn = auth.do_auth() # create bucket bucket_name = utils.gen_bucket_name_from_userid(rgw_user_info["user_id"], 0) bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info) # create object s3_object_name = utils.gen_s3_object_name(bucket_name, 0) resuables.upload_object( s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info ) if config.rgw_client == "swift": log.info("client type is swift") user_names = ["tuffy", "scooby", "max"] tenant = "tenant" umgmt = UserMgmt() umgmt.create_tenant_user( tenant_name=tenant, user_id=user_names[0], displayname=user_names[0] ) user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0]) auth = Auth(user_info) rgw = auth.do_auth() container_name = utils.gen_bucket_name_from_userid( user_info["user_id"], rand_no=0 ) container = s3_swift_lib.resource_op( {"obj": rgw, "resource": "put_container", "args": [container_name]} ) if container is False: raise TestExecError( "Resource execution failed: container creation faield" ) swift_object_name = utils.gen_s3_object_name( "%s.container.%s" % (user_names[0], 0), 0 ) log.info("object name: %s" % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info("object path: %s" % object_path) object_size = utils.get_file_size( config.objects_size_range["min"], config.objects_size_range["max"] ) data_info = manage_data.io_generator(object_path, object_size) # upload object if data_info is False: TestExecError("data creation failed") log.info("uploading object: %s" % object_path) with open(object_path, "r") as fp: rgw.put_object( container_name, swift_object_name, contents=fp.read(), content_type="text/plain", ) test_info.success_status("test passed") sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1)
def test_exec(rgw_user_info_file, config): test_info = AddTestInfo('NFS Basic Ops') test_info.started_info() log.info('io_config:\n%s' % config['config']) log.info('rgw_user_info_file: %s' % rgw_user_info_file) log.info('io_op_config: %s' % config['io_op_config']) io_config = config['config'] io_op_config = config['io_op_config'] log.info('initiating nfs ganesha') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file) mounted = nfs_ganesha.initialize() if mounted is False: raise TestExecError("mount failed") log.info('authenticating rgw user') mnt_point = nfs_ganesha.rgw_user_info['nfs_mnt_point'] if nfs_ganesha.rgw_user_info['nfs_version'] == 4 and \ nfs_ganesha.rgw_user_info['Pseudo'] is not None: log.info('nfs version: 4') log.info('adding Pseudo path to writable mount point') mnt_point = os.path.join(mnt_point, nfs_ganesha.rgw_user_info['Pseudo']) log.info('writable mount point with Pseudo: %s' % mnt_point) if io_op_config.get('create', None) is True: do_io = DoIO(nfs_ganesha.rgw_user_info, mnt_point) # base dir creation for bc in range(io_config['basedir_count']): basedir_name_to_create = utils.gen_bucket_name_from_userid( nfs_ganesha.rgw_user_info['user_id'], rand_no=bc) log.info('creating basedir with name: %s' % basedir_name_to_create) write = do_io.write('basedir', basedir_name_to_create) if write is False: raise TestExecError("write failed on mount point") if io_config['subdir_count'] != 0: for sd in range(io_config['subdir_count']): subdir_name_to_create = utils.gen_bucket_name_from_userid( basedir_name_to_create + ".subdir", rand_no=sd) log.info('creating subdir with name: %s' % subdir_name_to_create) write = do_io.write( 'subdir', os.path.join(basedir_name_to_create, subdir_name_to_create)) if write is False: raise TestExecError("write failed on mount point") if io_config['file_count'] != 0: for fc in range(io_config['file_count']): file_name_to_create = utils.gen_bucket_name_from_userid( basedir_name_to_create + ".file", rand_no=fc) log.info('creating file with name: %s' % file_name_to_create) file_size = utils.get_file_size( io_config['objects_size_range']['min'], io_config['objects_size_range']['max']) write = do_io.write( 'file', os.path.join(basedir_name_to_create, file_name_to_create), file_size) if write is False: raise TestExecError("write failed on mount point") log.info('verification of IO will start after %s seconds' % SLEEP_TIME) time.sleep(SLEEP_TIME) log.info('starting IO verification on S3') read_io_info_on_s3 = ReadIOInfoOnS3() read_io_info_on_s3.yaml_fname = 'io_info.yaml' read_io_info_on_s3.initialize_verify_io() bucket_verify = read_io_info_on_s3.verify_if_bucket_created() if bucket_verify is False: raise TestExecError("Bucket verification Failed") log.info('Bucket verified, data intact') read_io_info_on_s3.verify_if_objects_created() log.info('objects verified, data intact') log.info('verification completed, data intact') if io_op_config.get('delete', None) is True: log.info('performing delete operation') # if you delete basedirs, objects and files under them will also be deleted basedirs_list = read_io_info_on_s3.buckets list([ shutil.rmtree(os.path.abspath(os.path.join(mnt_point, x))) for x in basedirs_list ]) for basedir in basedirs_list: if os.path.exists( os.path.abspath(os.path.join(mnt_point, basedir))): raise TestExecError("basedir: %s not deleted" % basedir) log.info('basedirs and subdirs deleted') if io_op_config.get('move', None) is True: for each_file in read_io_info_on_s3.objects: if each_file['type'] == 'file': log.info('performing move operation on %s' % each_file['name']) current_path = os.path.abspath( os.path.join(mnt_point, each_file['bucket'], each_file['name'])) new_path = os.path.abspath( os.path.join(mnt_point, each_file['bucket'], each_file['name'] + ".moved")) moved = utils.exec_shell_cmd('sudo mv %s %s' % (current_path, new_path)) if moved is False: raise TestExecError("move failed for :%s" % current_path) each_file['name'] = os.path.basename(new_path) log.info('Verification will start after %s seconds' % SLEEP_TIME) time.sleep(SLEEP_TIME) log.info('starting verification for moved files') read_io_info_on_s3.verify_if_objects_created() log.info('objects verified after move operation, data intact') test_info.success_status("test success") except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') return 1 except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') return 1
def test_exec(config): test_info = AddTestInfo('test swift user key gen') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: test_info.started_info() # preparing data user_names = ['tuffy', 'scooby', 'max'] tenant1 = 'tenant' cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s --cluster %s' \ % (user_names[0], user_names[0], tenant1, config.cluster_name) out = utils.exec_shell_cmd(cmd) if out is False: raise TestExecError("RGW User creation error") log.info('output :%s' % out) v1_as_json = json.loads(out) log.info('creted user_id: %s' % v1_as_json['user_id']) cmd2 = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s' \ % (tenant1, user_names[0], user_names[0], tenant1, config.cluster_name) out2 = utils.exec_shell_cmd(cmd2) if out2 is False: raise TestExecError("sub-user creation error") v2_as_json = json.loads(out2) log.info('created subuser: %s' % v2_as_json['subusers'][0]['id']) cmd3 = 'radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret ' \ '--cluster %s' % (user_names[0], user_names[0], tenant1, tenant1, config.cluster_name) out3 = utils.exec_shell_cmd(cmd3) if out3 is False: raise TestExecError("secret_key gen error") v3_as_json = json.loads(out3) log.info('created subuser: %s\nsecret_key generated: %s' % (v3_as_json['swift_keys'][0]['user'], v3_as_json['swift_keys'][0]['secret_key'])) user_info = { 'user_id': v3_as_json['swift_keys'][0]['user'], 'key': v3_as_json['swift_keys'][0]['secret_key'] } auth = Auth(user_info) rgw = auth.do_auth() for cc in range(config.container_count): container_name = utils.gen_bucket_name_from_userid( user_info['user_id'], rand_no=cc) container = swiftlib.resource_op({ 'obj': rgw, 'resource': 'put_container', 'args': [container_name] }) if container is False: raise TestExecError( "Resource execution failed: container creation faield") for oc in range(config.objects_count): swift_object_name = utils.gen_s3_object_name( '%s.container.%s' % (user_names[0], cc), oc) log.info('object name: %s' % swift_object_name) object_path = os.path.join(TEST_DATA_PATH, swift_object_name) log.info('object path: %s' % object_path) object_size = utils.get_file_size( config.objects_size_range['min'], config.objects_size_range['max']) data_info = manage_data.io_generator(object_path, object_size) if data_info is False: TestExecError("data creation failed") log.info('uploading object: %s' % object_path) with open(object_path, 'r') as fp: rgw.put_object(container_name, swift_object_name, contents=fp.read(), content_type='text/plain') test_info.success_status('test passed') sys.exit(0) except Exception as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1) except TestExecError as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(config): test_info = AddTestInfo('test versioning with objects') io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) try: test_info.started_info() # create user all_users_info = s3lib.create_users(config.user_count, config.cluster_name) for each_user in all_users_info: # authenticate auth = Auth(each_user) rgw_conn = auth.do_auth() s3_object_names = [] # create buckets log.info('no of buckets to create: %s' % config.bucket_count) for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc) log.info('creating bucket with name: %s' % bucket_name_to_create) # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create) bucket = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Bucket', 'args': [bucket_name_to_create]}) # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']}) created = s3lib.resource_op({'obj': bucket, 'resource': 'create', 'args': None, 'extra_info': {'access_key': each_user['access_key']}}) if created is False: raise TestExecError("Resource execution failed: bucket creation faield") if created is not None: response = HttpResponseParser(created) if response.status_code == 200: log.info('bucket created') else: raise TestExecError("bucket creation failed") else: raise TestExecError("bucket creation failed") # getting bucket version object if config.test_ops['enable_version'] is True: log.info('bucket versionig test on bucket: %s' % bucket.name) # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name) bucket_versioning = s3lib.resource_op({'obj': rgw_conn, 'resource': 'BucketVersioning', 'args': [bucket.name]}) # checking the versioning status # version_status = s3_ops.resource_op(bucket_versioning, 'status') version_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'status', 'args': None }) if version_status is None: log.info('bucket versioning still not enabled') # enabling bucket versioning # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable') version_enable_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'enable', 'args': None}) response = HttpResponseParser(version_enable_status) if response.status_code == 200: log.info('version enabled') else: raise TestExecError("version enable failed") if config.objects_count > 0: log.info('s3 objects to create: %s' % config.objects_count) for oc in range(config.objects_count): # versioning upload s3_object_name = utils.gen_s3_object_name(bucket_name_to_create,str(oc)) s3_object_names.append(s3_object_name) log.info('s3 object name: %s' % s3_object_name) log.info('versioning count: %s' % config.version_count) s3_object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc)) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) original_data_info = manage_data.io_generator(s3_object_path, s3_object_size) if original_data_info is False: TestExecError("data creation failed") for vc in range(config.version_count): log.info('version count for %s is %s' % (s3_object_name, str(vc))) log.info('modifying data: %s' % s3_object_name) modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size, data='append', **{'message': '\nhello object for version: %s\n' % str(vc)}) if modified_data_info is False: TestExecError("data modification failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': each_user['access_key']}, **modified_data_info) object_uploaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'upload_file', 'args': [modified_data_info['name'], s3_object_name], 'extra_info': upload_info}) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') log.info('all versions for the object: %s\n' % s3_object_name) versions = bucket.object_versions.filter(Prefix=s3_object_name) for version in versions: log.info('key_name: %s --> version_id: %s' %(version.object_key, version.version_id)) if config.test_ops['copy_to_version'] is True: # reverting object to one of the versions ( randomly chosen ) version_id_to_copy = random.choice([v.version_id for v in versions]) log.info('version_id_to_copy: %s' % version_id_to_copy) s3_obj = rgw_conn.Object(bucket.name, s3_object_name) log.info('current version_id: %s' % s3_obj.version_id) copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name, 'Key': s3_object_name, 'VersionId': version_id_to_copy}) log.info('copy_response: %s' % copy_response) if copy_response is None: raise TestExecError("copy object from version id failed") # current_version_id = copy_response['VersionID'] log.info('current_version_id: %s' % s3_obj.version_id ) # delete the version_id_to_copy object s3_obj.delete(VersionId=version_id_to_copy) log.info('all versions for the object after the copy operation: %s\n' % s3_object_name) for version in versions: log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id)) # log.info('downloading current s3object: %s' % s3_object_name) # s3_obj.download_file(s3_object_name + ".download") if config.test_ops['delete_object_versions'] is True: log.info('deleting s3_obj keys and its versions') s3_obj = s3lib.resource_op({'obj': rgw_conn, 'resource': 'Object', 'args': [bucket.name, s3_object_name]}) log.info('deleting versions for s3 obj: %s' % s3_object_name) for version in versions: log.info('trying to delete obj version: %s' % version.version_id) del_obj_version = s3lib.resource_op({'obj': s3_obj, 'resource': 'delete', 'kwargs': dict(VersionId=version.version_id)}) log.info('response:\n%s' % del_obj_version) if del_obj_version is not None: response = HttpResponseParser(del_obj_version) if response.status_code == 204: log.info('version deleted ') else: raise TestExecError("version deletion failed") else: raise TestExecError("version deletion failed") if config.test_ops['suspend_version'] is True: # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend') suspend_version_status = s3lib.resource_op({'obj': bucket_versioning, 'resource': 'suspend', 'args': None}) response = HttpResponseParser(suspend_version_status) if response.status_code == 200: log.info('versioning suspended') else: raise TestExecError("version suspend failed") if config.test_ops['upload_after_suspend'] is True: log.info('trying to upload after suspending versioning on bucket') for s3_object_name in s3_object_names: # non versioning upload log.info('s3 object name: %s' % s3_object_name) s3_object_size = utils.get_file_size(config.objects_size_range['min'], config.objects_size_range['max']) s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append", **{'message': '\nhello object for non version\n'}) if non_version_data_info is False: TestExecError("data creation failed") log.info('uploading s3 object: %s' % s3_object_path) upload_info = dict({'access_key': each_user['access_key']}, **non_version_data_info) object_uploaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'upload_file', 'args': [non_version_data_info['name'], s3_object_name], 'extra_info': upload_info}) if object_uploaded_status is False: raise TestExecError("Resource execution failed: object upload failed") if object_uploaded_status is None: log.info('object uploaded') s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name+".download") object_downloaded_status = s3lib.resource_op({'obj': bucket, 'resource': 'download_file', 'args': [s3_object_name, s3_object_download_path], }) if object_downloaded_status is False: raise TestExecError("Resource execution failed: object download failed") if object_downloaded_status is None: log.info('object downloaded') # checking md5 of the downloaded file s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path) log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5) log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5']) test_info.success_status('test passed') sys.exit(0) except Exception,e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status('test failed') sys.exit(1)
def test_exec(rgw_user_info_file, config): test_info = AddTestInfo("NFS Basic Ops") test_info.started_info() log.info("config:\n%s" % config["config"]) log.info("rgw_user_info_file: %s" % rgw_user_info_file) io_config = config["config"] io_op_config = io_config["io_op_config"] log.info("io_op_config: %s" % io_op_config) log.info("initiating nfs ganesha") io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file) mounted = nfs_ganesha.initialize() if not mounted: raise TestExecError("mount failed") log.info("authenticating rgw user") mount_point = nfs_ganesha.rgw_user_info["nfs_mnt_point"] if ( nfs_ganesha.rgw_user_info["nfs_version"] == 4 and nfs_ganesha.rgw_user_info["Pseudo"] is not None ): log.info("nfs version: 4") log.info("adding Pseudo path to writable mount point") mount_point = os.path.join(mount_point, nfs_ganesha.rgw_user_info["Pseudo"]) log.info("writable mount point with Pseudo: %s" % mount_point) if io_op_config.get("create", None): do_io = DoIO(nfs_ganesha.rgw_user_info, mount_point) # base dir creation for bc in range(io_config["basedir_count"]): basedir_name_to_create = utils.gen_bucket_name_from_userid( nfs_ganesha.rgw_user_info["user_id"], rand_no=bc ) log.info("creating basedir with name: %s" % basedir_name_to_create) do_io.write("basedir", basedir_name_to_create) if io_config["subdir_count"] != 0: for sd in range(io_config["subdir_count"]): subdir_name_to_create = utils.gen_bucket_name_from_userid( basedir_name_to_create + ".subdir", rand_no=sd ) log.info("creating subdir with name: %s" % subdir_name_to_create) do_io.write( "subdir", os.path.join(basedir_name_to_create, subdir_name_to_create), ) if io_config["file_count"] != 0: for fc in range(io_config["file_count"]): file_name_to_create = utils.gen_bucket_name_from_userid( basedir_name_to_create + ".file", rand_no=fc ) log.info("creating file with name: %s" % file_name_to_create) file_size = utils.get_file_size( io_config["objects_size_range"]["min"], io_config["objects_size_range"]["max"], ) do_io.write( "file", os.path.join(basedir_name_to_create, file_name_to_create), file_size, ) log.info("verification of IO will start after %s seconds" % SLEEP_TIME) time.sleep(SLEEP_TIME) log.info("starting IO verification on S3") read_io_info_on_s3 = ReadIOInfoOnS3() read_io_info_on_s3.yaml_fname = "io_info.yaml" read_io_info_on_s3.initialize_verify_io() bucket_verify = read_io_info_on_s3.verify_if_bucket_created() if not bucket_verify: raise TestExecError("Bucket verification Failed") log.info("Bucket verified, data intact") read_io_info_on_s3.verify_if_objects_created() log.info("objects verified, data intact") log.info("verification completed, data intact") if io_op_config.get("delete", None): log.info("performing delete operation") # if you delete basedirs, objects and files under them will also be deleted basedirs_list = read_io_info_on_s3.buckets list( [ shutil.rmtree(os.path.abspath(os.path.join(mount_point, x))) for x in basedirs_list ] ) for basedir in basedirs_list: if os.path.exists(os.path.abspath(os.path.join(mount_point, basedir))): raise TestExecError("basedir: %s not deleted" % basedir) log.info("basedirs and subdirs deleted") if io_op_config.get("move", None): for each_file in read_io_info_on_s3.objects: if each_file["type"] == "file": log.info("performing move operation on %s" % each_file["name"]) current_path = os.path.abspath( os.path.join( mount_point, each_file["bucket"], each_file["name"] ) ) new_path = os.path.abspath( os.path.join( mount_point, each_file["bucket"], each_file["name"] + ".moved", ) ) moved = utils.exec_shell_cmd( "sudo mv %s %s" % (current_path, new_path) ) if moved is False: raise TestExecError("move failed for :%s" % current_path) each_file["name"] = os.path.basename(new_path) log.info("Verification will start after %s seconds" % SLEEP_TIME) time.sleep(SLEEP_TIME) log.info("starting verification for moved files") read_io_info_on_s3.verify_if_objects_created() log.info("objects verified after move operation, data intact") # cleanup and unmount tasks for both nfs v3 and v4 if nfs_ganesha.rgw_user_info["cleanup"]: utils.exec_shell_cmd("sudo rm -rf %s%s" % (mount_point, "/*")) # Todo: There's a need to change the behaviour of exec_shell_cmd() function which returns # an empty string as an output on the successful execution of a command. if nfs_ganesha.rgw_user_info["do_unmount"]: if nfs_ganesha.do_un_mount() != "": raise NFSGaneshaMountError("Unmount failed") test_info.success_status("test success")