Пример #1
0
    def create_subuser(self, tenant_name, user_id, cluster_name="ceph"):
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            keys = utils.gen_access_key_secret_key(user_id)
            cmd = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s' \
                  % (tenant_name, user_id, user_id, tenant_name, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['subusers'][0]['id']
            user_details['key'] = v_as_json['swift_keys'][0]['secret_key']
            user_details['tenant'], _ = user_details['user_id'].split('$')
            user_info = basic_io_structure.user(**{'user_id': user_details['user_id'],
                                                   'secret_key': user_details['key'],
                                                   'access_key': ' '})
            write_user_info.add_user_info(dict(user_info, **tenant_info.tenant(user_details['tenant'])))
            log.info('secret_key: %s' % user_details['key'])
            log.info('user_id: %s' % user_details['user_id'])
            log.info('tenant: %s' % user_details['tenant'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            return False
Пример #2
0
    def create_admin_user(self, user_id, displayname, cluster_name='ceph'):
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info('cluster name: %s' % cluster_name)
            cmd = 'radosgw-admin user create --uid=%s --display-name=%s --cluster %s' % (
                user_id, displayname, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['user_id']
            user_details['display_name'] = v_as_json['display_name']
            user_details['access_key'] = v_as_json['keys'][0]['access_key']
            user_details['secret_key'] = v_as_json['keys'][0]['secret_key']
            user_info = basic_io_structure.user(**{'user_id': user_details['user_id'],
                                                   'access_key': user_details['access_key'],
                                                   'secret_key': user_details['secret_key']})
            write_user_info.add_user_info(user_info)
            log.info('access_key: %s' % user_details['access_key'])
            log.info('secret_key: %s' % user_details['secret_key'])
            log.info('user_id: %s' % user_details['user_id'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
Пример #3
0
    def create_admin_user(self, user_id, displayname, cluster_name="ceph"):
        """
        Function to create a S3-interface/admin user

        The S3-interface/admin user is created with the user_id, displayname, cluster_name.

        Parameters:
            user_id (char): id of the user
            displayname (char): Display Name of the user
            cluster_name (char): Name of the ceph cluster. defaults to 'ceph'

        Returns:
            user details, which contain the following
                - user_id
                - display_name
                - access_key
                - secret_key
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info("cluster name: %s" % cluster_name)
            op = utils.exec_shell_cmd("radosgw-admin user list")
            if user_id in op:
                cmd = f"radosgw-admin user info --uid='{user_id}' --cluster {cluster_name}"
            else:
                cmd = f"radosgw-admin user create --uid='{user_id}' --display-name='{displayname}' --cluster {cluster_name}"
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details["user_id"] = v_as_json["user_id"]
            user_details["display_name"] = v_as_json["display_name"]
            user_details["access_key"] = v_as_json["keys"][0]["access_key"]
            user_details["secret_key"] = v_as_json["keys"][0]["secret_key"]
            user_info = basic_io_structure.user(
                **{
                    "user_id": user_details["user_id"],
                    "access_key": user_details["access_key"],
                    "secret_key": user_details["secret_key"],
                })
            write_user_info.add_user_info(user_info)
            log.info("access_key: %s" % user_details["access_key"])
            log.info("secret_key: %s" % user_details["secret_key"])
            log.info("user_id: %s" % user_details["user_id"])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
Пример #4
0
    def create_admin_user(self, user_id, displayname, cluster_name='ceph'):
        """
            Function to create a S3-interface/admin user

            The S3-interface/admin user is created with the user_id, displayname, cluster_name.

            Parameters:
                user_id (char): id of the user
                displayname (char): Display Name of the user
                cluster_name (char): Name of the ceph cluster. defaults to 'ceph'

            Returns:
                user details, which contain the following
                    - user_id
                    - display_name
                    - access_key
                    - secret_key
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info('cluster name: %s' % cluster_name)
            cmd = 'radosgw-admin user create --uid=%s --display-name=%s --cluster %s' % (
                user_id, displayname, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['user_id']
            user_details['display_name'] = v_as_json['display_name']
            user_details['access_key'] = v_as_json['keys'][0]['access_key']
            user_details['secret_key'] = v_as_json['keys'][0]['secret_key']
            user_info = basic_io_structure.user(
                **{
                    'user_id': user_details['user_id'],
                    'access_key': user_details['access_key'],
                    'secret_key': user_details['secret_key']
                })
            write_user_info.add_user_info(user_info)
            log.info('access_key: %s' % user_details['access_key'])
            log.info('secret_key: %s' % user_details['secret_key'])
            log.info('user_id: %s' % user_details['user_id'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
Пример #5
0
    def create_subuser(self, tenant_name, user_id, cluster_name="ceph"):
        """
        Function to create an subuser under a tenant.

        To create an swift-interface user under tenant.
        Parameters:
             tenant_name (char): Name of the tenant
             user_id (char): id of the user
             cluster_name (char): Name of the ceph cluster. defaults to 'ceph'
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            keys = utils.gen_access_key_secret_key(user_id)
            cmd = (
                "radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s"
                % (tenant_name, user_id, user_id, tenant_name, cluster_name))
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details["user_id"] = v_as_json["subusers"][0]["id"]
            user_details["key"] = v_as_json["swift_keys"][0]["secret_key"]
            user_details["tenant"], _ = user_details["user_id"].split("$")
            user_info = basic_io_structure.user(
                **{
                    "user_id": user_details["user_id"],
                    "secret_key": user_details["key"],
                    "access_key": " ",
                })
            write_user_info.add_user_info(
                dict(user_info, **tenant_info.tenant(user_details["tenant"])))
            log.info("secret_key: %s" % user_details["key"])
            log.info("user_id: %s" % user_details["user_id"])
            log.info("tenant: %s" % user_details["tenant"])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            return False
Пример #6
0
def test_exec(rgw_user_info_file, config):

    test_info = AddTestInfo('Test Basic IO on S3')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()

    try:

        test_info.started_info()

        with open(rgw_user_info_yaml, 'r') as f:
            rgw_user_info = yaml.load(f)

        mount_point = rgw_user_info['nfs_mnt_point']

        nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file)

        mounted = nfs_ganesha.initialize(write_io_info=False)

        if mounted is False:
            raise TestExecError("mount failed")

        if nfs_ganesha.rgw_user_info[
                'nfs_version'] == 4 and nfs_ganesha.rgw_user_info[
                    'Pseudo'] is not None:
            log.info('nfs version: 4')
            log.info('adding Pseudo path to writable mount point')
            mount_point = os.path.join(mount_point,
                                       nfs_ganesha.rgw_user_info['Pseudo'])
            log.info('writable mount point with Pseudo: %s' % mount_point)

        log.info('authenticating rgw user')

        # authenticate

        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()

        # add user_info io_info yaml file

        user_info_add = basic_io_structure.user(**rgw_user_info)
        write_user_info.add_user_info(user_info_add)

        if config.io_op_config.get('create', None) is True:

            # create buckets

            for bc in range(config.bucket_count):

                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    rgw_user_info['user_id'], rand_no=bc)

                bucket = s3_reusables.create_bucket(bucket_name_to_create,
                                                    rgw_conn, rgw_user_info)

                # uploading data

                log.info('s3 objects to create: %s' % config.objects_count)

                for oc in range(config.objects_count):

                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)

                    s3_reusables.upload_object(s3_object_name, bucket,
                                               TEST_DATA_PATH, config,
                                               rgw_user_info)

            log.info('verification Starts on NFS mount after %s seconds' %
                     SLEEP_TIME)

            time.sleep(SLEEP_TIME)

            read_io_info_on_nfs = ReadIOInfoOnNFS(mount_point)
            read_io_info_on_nfs.yaml_fname = 'io_info.yaml'
            read_io_info_on_nfs.initialize_verify_io()
            read_io_info_on_nfs.verify_if_basedir_created()
            read_io_info_on_nfs.verify_if_files_created()

            log.info('verification complete, data intact')

            created_buckets = read_io_info_on_nfs.base_dirs
            created_objects = read_io_info_on_nfs.files

            if config.io_op_config.get('delete', None) is True:

                log.info('delete operation starts')

                for bucket_name in created_buckets:

                    bucket = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Bucket',
                        'args': [os.path.basename(bucket_name)]
                    })  # buckets are base dirs in NFS

                    objects = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'objects',
                        'args': None
                    })

                    log.info('deleting all objects in bucket')

                    objects_deleted = s3lib.resource_op({
                        'obj': objects,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('objects_deleted: %s' % objects_deleted)

                    if objects_deleted is False:
                        raise TestExecError(
                            'Resource execution failed: Object deletion failed'
                        )

                    if objects_deleted is not None:

                        response = HttpResponseParser(objects_deleted[0])

                        if response.status_code == 200:
                            log.info('objects deleted ')

                        else:
                            raise TestExecError("objects deletion failed")

                    else:
                        raise TestExecError("objects deletion failed")

                    log.info('deleting bucket: %s' % bucket.name)

                    bucket_deleted_status = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('bucket_deleted_status: %s' %
                             bucket_deleted_status)

                    if bucket_deleted_status is not None:

                        response = HttpResponseParser(bucket_deleted_status)

                        if response.status_code == 204:
                            log.info('bucket deleted ')

                        else:
                            raise TestExecError("bucket deletion failed")

                    else:
                        raise TestExecError("bucket deletion failed")

                log.info(
                    'verification on NFS will start after %s seconds for delete operation'
                    % SLEEP_TIME)

                time.sleep(200)

                for basedir in created_buckets:

                    exists = os.path.exists(basedir)

                    log.info('exists status: %s' % exists)

                    if exists is True:
                        raise TestExecError(
                            "Basedir or Basedir: %s not deleted on NFS" %
                            basedir)

                log.info('basedirs deleted')

                for each_file in created_objects:

                    log.info('verifying existence for: %s' % each_file['file'])

                    exists = os.path.exists(each_file['file'])

                    if exists:
                        raise TestExecError("files not created")

                    log.info('file deleted')

                log.info(
                    'verification of files complete, files exists and data intact'
                )

            if config.io_op_config.get('move', None) is True:

                log.info('move operation starts')

                for each_file in created_objects:

                    # in s3 move operation is achieved by copying the same object with the new name and
                    #  deleting the old object

                    log.info('move operation for :%s' % each_file['file'])

                    new_obj_name = os.path.basename(
                        each_file['file']) + ".moved"

                    log.info('new file name: %s' % new_obj_name)

                    new_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [each_file['bucket'], new_obj_name],
                    })

                    new_object.copy_from(
                        CopySource='%s/%s' %
                        (each_file['bucket'],
                         os.path.basename(
                             each_file['file'])))  # old object name

                    old_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [
                            each_file['bucket'],
                            os.path.basename(each_file['file'])
                        ],
                    })
                    old_object.delete()

                    each_file['file'] = os.path.abspath(
                        os.path.join(mount_point, each_file['bucket'],
                                     new_obj_name))

                log.info(
                    'verification on NFS for move operation will start after %s seconds'
                    % SLEEP_TIME)
                time.sleep(SLEEP_TIME)

                read_io_info_on_nfs.verify_if_files_created()

                log.info('move completed, data intact')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Пример #7
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    test_info = AddTestInfo('create m buckets')
    conf_path = '/etc/ceph/%s.conf' % config.cluster_name
    ceph_conf = CephConfOp(conf_path)
    rgw_service = RGWService()
    try:
        test_info.started_info()
        # get user
        with open('user_details') as fout:
            all_users_info = simplejson.load(fout)
        for each_user in all_users_info:
            user_info = basic_io_structure.user(
                **{
                    'user_id': each_user['user_id'],
                    'access_key': each_user['access_key'],
                    'secret_key': each_user['secret_key']
                })
            write_user_info.add_user_info(user_info)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            # enabling sharding
            if config.test_ops['sharding']['enable'] is True:
                log.info('enabling sharding on buckets')
                max_shards = config.test_ops['sharding']['max_shards']
                log.info('making changes to ceph.conf')
                ceph_conf.set_to_ceph_conf(
                    'global', ConfigOpts.rgw_override_bucket_index_max_shards,
                    max_shards)
                log.info('trying to restart services ')
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info('RGW service restarted')
            # create buckets
            if config.test_ops['create_bucket'] is True:
                log.info('no of buckets to create: %s' % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name_to_create = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=bc)
                    log.info('creating bucket with name: %s' %
                             bucket_name_to_create)
                    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                    bucket = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'Bucket',
                        'args': [bucket_name_to_create]
                    })
                    created = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'create',
                        'args': None,
                        'extra_info': {
                            'access_key': each_user['access_key']
                        }
                    })
                    if created is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation failed"
                        )
                    if created is not None:
                        response = HttpResponseParser(created)
                        if response.status_code == 200:
                            log.info('bucket created')
                        else:
                            raise TestExecError("bucket creation failed")
                    else:
                        raise TestExecError("bucket creation failed")
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id' \
                              % (bucket.name, config.cluster_name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace(
                            '"',
                            '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls --cluster %s | grep %s' \
                               % (config.cluster_name, b_id)
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            'got output from sharing verification.--------')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])

    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    buckets_created = []

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc
            )
            log.info("creating bucket with name: %s" % bucket_name)
            bucket = reusable.create_bucket(
                bucket_name, s3_client_rgw, assumed_role_user_info
            )
            buckets_created.append(bucket)

        if config.test_ops["create_object"] is True:
            for bucket in buckets_created:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    if config.test_ops["server_side_copy"] is True:
        bucket1, bucket2 = buckets_created

        # copy object1 from bucket1 to bucket2 with the same name as in bucket1
        log.info("copying first object from bucket1 to bucket2")
        all_keys_in_buck1 = []
        for obj in bucket1.objects.all():
            all_keys_in_buck1.append(obj.key)
        copy_source = {"Bucket": bucket1.name, "Key": all_keys_in_buck1[0]}
        copy_object_name = all_keys_in_buck1[0] + "_copied_obj"
        log.info(f"copy object name: {copy_object_name}")
        bucket2.copy(copy_source, copy_object_name)

        # list the objects in bucket2
        log.info("listing all objects im bucket2 after copy")
        all_bucket2_objs = []
        for obj in bucket2.objects.all():
            log.info(obj.key)
            all_bucket2_objs.append(obj.key)

        # check for object existence in bucket2
        if copy_object_name in all_bucket2_objs:
            log.info("server side copy successful")
        else:
            raise TestExecError("server side copy operation was not successful")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Пример #9
0
def test_exec(rgw_user_info_file, config):

    test_info = AddTestInfo("Test Basic IO on S3")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()

    try:
        test_info.started_info()
        with open(rgw_user_info_yaml, "r") as f:
            rgw_user_info = yaml.safe_load(f)
        mount_point = rgw_user_info["nfs_mnt_point"]
        nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file)
        mounted = nfs_ganesha.initialize(write_io_info=False)
        if mounted is False:
            raise TestExecError("mount failed")
        if (nfs_ganesha.rgw_user_info["nfs_version"] == 4
                and nfs_ganesha.rgw_user_info["Pseudo"] is not None):
            log.info("nfs version: 4")
            log.info("adding Pseudo path to writable mount point")
            mount_point = os.path.join(mount_point,
                                       nfs_ganesha.rgw_user_info["Pseudo"])
            log.info("writable mount point with Pseudo: %s" % mount_point)
        log.info("authenticating rgw user")

        # authenticate
        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()
        # add user_info io_info yaml file
        user_info_add = basic_io_structure.user(**rgw_user_info)
        write_user_info.add_user_info(user_info_add)
        if config.io_op_config.get("create", None) is True:
            # create buckets
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    rgw_user_info["user_id"], rand_no=bc)
                bucket = s3_reusables.create_bucket(bucket_name_to_create,
                                                    rgw_conn, rgw_user_info)
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc in range(config.objects_count):
                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)
                    config.obj_size = utils.get_file_size(
                        config.objects_size_range.get("min"),
                        config.objects_size_range.get("max"),
                    )
                    s3_reusables.upload_object(s3_object_name, bucket,
                                               TEST_DATA_PATH, config,
                                               rgw_user_info)
            log.info("verification Starts on NFS mount after %s seconds" %
                     SLEEP_TIME)
            time.sleep(SLEEP_TIME)
            read_io_info_on_nfs = ReadIOInfoOnNFS(mount_point)
            read_io_info_on_nfs.yaml_fname = "io_info.yaml"
            read_io_info_on_nfs.initialize_verify_io()
            read_io_info_on_nfs.verify_if_basedir_created()
            read_io_info_on_nfs.verify_if_files_created()
            log.info("verification complete, data intact")
            created_buckets = read_io_info_on_nfs.base_dirs
            created_objects = read_io_info_on_nfs.files
            if config.io_op_config.get("delete", None) is True:
                log.info("delete operation starts")
                for bucket_name in created_buckets:
                    bucket = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "Bucket",
                        "args": [os.path.basename(bucket_name)],
                    })  # buckets are base dirs in NFS
                    objects = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "objects",
                        "args": None
                    })
                    log.info("deleting all objects in bucket")
                    objects_deleted = s3lib.resource_op({
                        "obj": objects,
                        "resource": "delete",
                        "args": None
                    })
                    log.info("objects_deleted: %s" % objects_deleted)
                    if objects_deleted is False:
                        raise TestExecError(
                            "Resource execution failed: Object deletion failed"
                        )
                    if objects_deleted is not None:
                        response = HttpResponseParser(objects_deleted[0])
                        if response.status_code == 200:
                            log.info("objects deleted ")
                        else:
                            raise TestExecError("objects deletion failed")
                    else:
                        raise TestExecError("objects deletion failed")
                    log.info("deleting bucket: %s" % bucket.name)
                    bucket_deleted_status = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "delete",
                        "args": None
                    })
                    log.info("bucket_deleted_status: %s" %
                             bucket_deleted_status)
                    if bucket_deleted_status is not None:
                        response = HttpResponseParser(bucket_deleted_status)
                        if response.status_code == 204:
                            log.info("bucket deleted ")
                        else:
                            raise TestExecError("bucket deletion failed")
                    else:
                        raise TestExecError("bucket deletion failed")

                log.info(
                    "verification on NFS will start after %s seconds for delete operation"
                    % SLEEP_TIME)
                time.sleep(200)

                for basedir in created_buckets:
                    exists = os.path.exists(basedir)
                    log.info("exists status: %s" % exists)
                    if exists is True:
                        raise TestExecError(
                            "Basedir or Basedir: %s not deleted on NFS" %
                            basedir)
                log.info("basedirs deleted")
                for each_file in created_objects:
                    log.info("verifying existence for: %s" % each_file["file"])
                    exists = os.path.exists(each_file["file"])
                    if exists:
                        raise TestExecError("files not created")
                    log.info("file deleted")
                log.info(
                    "verification of files complete, files exists and data intact"
                )

            if config.io_op_config.get("move", None) is True:
                log.info("move operation starts")
                for each_file in created_objects:
                    # in s3 move operation is achieved by copying the same object with the new name and
                    #  deleting the old object
                    log.info("move operation for :%s" % each_file["file"])
                    new_obj_name = os.path.basename(
                        each_file["file"]) + ".moved"
                    log.info("new file name: %s" % new_obj_name)
                    new_object = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "Object",
                        "args": [each_file["bucket"], new_obj_name],
                    })
                    new_object.copy_from(
                        CopySource="%s/%s" %
                        (each_file["bucket"],
                         os.path.basename(
                             each_file["file"])))  # old object name
                    old_object = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "Object",
                        "args": [
                            each_file["bucket"],
                            os.path.basename(each_file["file"]),
                        ],
                    })
                    old_object.delete()
                    each_file["file"] = os.path.abspath(
                        os.path.join(mount_point, each_file["bucket"],
                                     new_obj_name))
                log.info(
                    "verification on NFS for move operation will start after %s seconds"
                    % SLEEP_TIME)
                time.sleep(SLEEP_TIME)
                read_io_info_on_nfs.verify_if_files_created()
                log.info("move completed, data intact")

        test_info.success_status("test passed")
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
Пример #10
0
    def create_rest_admin_user(self,
                               user_id,
                               displayname,
                               cluster_name="ceph"):
        """
        Function to create an user with administrative capabilities

        To enable a user to exercise administrative functionality via the REST API

        Parameters:
            user_id (char): id of the user
            displayname (char): Display Name of the user
            cluster_name (char): Name of the ceph cluster. defaults to 'ceph'

        Returns:
            user details, which contain the following
                - user_id
                - display_name
                - access_key
                - secret_key
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info("cluster name: %s" % cluster_name)
            cmd = (
                "radosgw-admin user create --uid=%s --display-name=%s --cluster %s"
                % (user_id, displayname, cluster_name))
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            time.sleep(10)
            cmd = 'radosgw-admin caps add --uid=%s --caps="users=*" --cluster %s' % (
                user_id,
                cluster_name,
            )
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details["user_id"] = v_as_json["user_id"]
            user_details["display_name"] = v_as_json["display_name"]
            user_details["access_key"] = v_as_json["keys"][0]["access_key"]
            user_details["secret_key"] = v_as_json["keys"][0]["secret_key"]
            user_info = basic_io_structure.user(
                **{
                    "user_id": user_details["user_id"],
                    "access_key": user_details["access_key"],
                    "secret_key": user_details["secret_key"],
                })
            write_user_info.add_user_info(user_info)
            log.info("access_key: %s" % user_details["access_key"])
            log.info("secret_key: %s" % user_details["secret_key"])
            log.info("user_id: %s" % user_details["user_id"])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
Пример #11
0
    def create_tenant_user(self,
                           tenant_name,
                           user_id,
                           displayname,
                           cluster_name="ceph"):
        """
        Function to create an user under a tenant.

        To create an S3-interface user under tenant.

        Parameters:
            tenant_name (char): Name of the tenant
            user_id (char): id of the user
            displayname (char): Display Name of the user
            cluster_name (char): Name of the ceph cluster. defaults to 'ceph'

        Returns:
            user details, which contain the following
                - user_id
                - display_name
                - access_key
                - secret_key
                - tenant
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            keys = utils.gen_access_key_secret_key(user_id)
            cmd = ('radosgw-admin --tenant %s --uid %s --display-name "%s" '
                   "--access_key %s --secret %s user create --cluster %s" % (
                       tenant_name,
                       user_id,
                       displayname,
                       keys["access_key"],
                       keys["secret_key"],
                       cluster_name,
                   ))
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details["user_id"] = v_as_json["user_id"]
            user_details["display_name"] = v_as_json["display_name"]
            user_details["access_key"] = v_as_json["keys"][0]["access_key"]
            user_details["secret_key"] = v_as_json["keys"][0]["secret_key"]
            user_details["tenant"], user_details["user_id"] = user_details[
                "user_id"].split("$")
            user_info = basic_io_structure.user(
                **{
                    "user_id": user_details["user_id"],
                    "access_key": user_details["access_key"],
                    "secret_key": user_details["secret_key"],
                })
            write_user_info.add_user_info(
                dict(user_info, **tenant_info.tenant(user_details["tenant"])))
            log.info("access_key: %s" % user_details["access_key"])
            log.info("secret_key: %s" % user_details["secret_key"])
            log.info("user_id: %s" % user_details["user_id"])
            log.info("tenant: %s" % user_details["tenant"])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            return False
Пример #12
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    test_info = AddTestInfo("create m buckets")
    conf_path = "/etc/ceph/%s.conf" % config.cluster_name
    ceph_conf = CephConfOp(conf_path)
    rgw_service = RGWService()
    try:
        test_info.started_info()
        # get user
        with open("user_details") as fout:
            all_users_info = simplejson.load(fout)
        for each_user in all_users_info:
            user_info = basic_io_structure.user(
                **{
                    "user_id": each_user["user_id"],
                    "access_key": each_user["access_key"],
                    "secret_key": each_user["secret_key"],
                })
            write_user_info.add_user_info(user_info)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            # enabling sharding
            if config.test_ops["sharding"]["enable"] is True:
                log.info("enabling sharding on buckets")
                max_shards = config.test_ops["sharding"]["max_shards"]
                log.info("making changes to ceph.conf")
                ceph_conf.set_to_ceph_conf(
                    "global",
                    ConfigOpts.rgw_override_bucket_index_max_shards,
                    max_shards,
                )
                log.info("trying to restart services ")
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info("RGW service restarted")
            # create buckets
            if config.test_ops["create_bucket"] is True:
                log.info("no of buckets to create: %s" % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name_to_create = utils.gen_bucket_name_from_userid(
                        each_user["user_id"], rand_no=bc)
                    log.info("creating bucket with name: %s" %
                             bucket_name_to_create)
                    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                    bucket = s3lib.resource_op({
                        "obj": rgw_conn,
                        "resource": "Bucket",
                        "args": [bucket_name_to_create],
                    })
                    created = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "create",
                        "args": None,
                        "extra_info": {
                            "access_key": each_user["access_key"]
                        },
                    })
                    if created is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation failed"
                        )
                    if created is not None:
                        response = HttpResponseParser(created)
                        if response.status_code == 200:
                            log.info("bucket created")
                        else:
                            raise TestExecError("bucket creation failed")
                    else:
                        raise TestExecError("bucket creation failed")
                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id"
                            % (bucket.name, config.cluster_name))
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (out.replace(
                            '"',
                            "").strip().split(":")[1].strip().replace(",", ""))
                        cmd2 = (
                            "rados -p default.rgw.buckets.index ls --cluster %s | grep %s"
                            % (config.cluster_name, b_id))
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            "got output from sharing verification.--------")
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    # Adding caps for user1
    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    # user1 auth with iam_client
    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    # policy document
    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])
    print(policy_document)

    # role policy
    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")
    print(role_policy)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    # role creation happens here
    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    # Put role policy happening here
    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    # bucket creation operations now
    bucket_name = "testbucket" + user1["user_id"]

    # authenticating user1 for bucket creation operation
    auth = Auth(user1, ssl=config.ssl)
    user1_info = {
        "access_key": user1["access_key"],
        "secret_key": user1["secret_key"],
        "user_id": user1["user_id"],
    }
    s3_client_u1 = auth.do_auth()

    # bucket creation operation
    bucket = reusable.create_bucket(bucket_name, s3_client_u1, user1_info)

    # uploading objects to the bucket
    if config.test_ops["create_object"]:
        # uploading data
        log.info("s3 objects to create: %s" % config.objects_count)
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            s3_object_name = utils.gen_s3_object_name(bucket_name, oc)
            log.info("s3 object name: %s" % s3_object_name)
            s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
            log.info("s3 object path: %s" % s3_object_path)
            if config.test_ops.get("upload_type") == "multipart":
                log.info("upload type: multipart")
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )
            else:
                log.info("upload type: normal")
                reusable.upload_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )
    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }
    log.info("got the credentials after assume role")

    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client = s3client.do_auth_using_client()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    unexisting_object = bucket_name + "_unexisting_object"
    try:
        response = s3_client.head_object(Bucket=bucket_name, Key=unexisting_object)
    except botocore.exceptions.ClientError as e:
        response_code = e.response["Error"]["Code"]
        log.info(response_code)
        if e.response["Error"]["Code"] == "404":
            log.info("404 Unexisting Object Not Found")
        elif e.response["Error"]["Code"] == "403":
            raise TestExecError("Error code : 403 - HeadObject operation: Forbidden")
Пример #14
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_sts_key,
                                     sesison_encryption_token)
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts,
                                     True)
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1)
    iam_client = auth.do_auth_iam_client()
    """
    TODO:
    policy_document and role_policy can be used valid dict types.
    need to explore on this. 
    """

    policy_document = (
        '{"Version":"2012-10-17",'
        '"Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/%s"]},'
        '"Action":["sts:AssumeRole"]}]}' % (user2["user_id"]))

    role_policy = ('{"Version":"2012-10-17",'
                   '"Statement":{"Effect":"Allow",'
                   '"Action":"s3:*",'
                   '"Resource":"arn:aws:s3:::*"}}')

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.
        format(user_id=user1["user_id"]))
    utils.exec_shell_cmd(add_caps_cmd)

    # log.info(policy_document)
    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        })
    write_user_info.add_user_info(user_info)

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            bucket = reusable.create_bucket(bucket_name_to_create,
                                            s3_client_rgw,
                                            assumed_role_user_info)
            if config.test_ops["create_object"] is True:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")