コード例 #1
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    _, version_name = utils.get_ceph_version()
    if "nautilus" in version_name:
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker,
                                   str(config.rgw_lc_max_worker))
    else:
        ceph_conf.set_to_ceph_conf(
            section=None,
            option=ConfigOpts.rgw_lc_max_worker,
            value=str(config.rgw_lc_max_worker),
        )
        ceph_conf.set_to_ceph_conf(section=None,
                                   option=ConfigOpts.rgw_lc_debug_interval,
                                   value="30")
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    config.user_count = 1
    config.bucket_count = 1
    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                    rand_no=1)
    obj_list = []
    obj_tag = "suffix1=WMV1"
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(
        map(
            lambda x: x,
            [
                rule["Filter"].get("Prefix")
                or rule["Filter"]["And"].get("Prefix")
                for rule in config.lifecycle_conf
            ],
        ))
    prefix = prefix if prefix else ["dummy1"]
    if config.test_ops["enable_versioning"] is True:
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops["version_count"] > 0:
                    for vc in range(config.test_ops["version_count"]):
                        log.info("version count for %s is %s" %
                                 (s3_object_name, str(vc)))
                        log.info("modifying data: %s" % s3_object_name)
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            user_info,
                            append_data=True,
                            append_msg="hello object for version: %s\n" %
                            str(vc),
                        )
                else:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    reusable.upload_object(s3_object_name, bucket,
                                           TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops["delete_marker"] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                                   life_cycle_rule_new, config)
    if config.test_ops["enable_versioning"] is False:
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                reusable.upload_object_with_tagging(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_and_rule(bucket, config)
    reusable.remove_user(user_info)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
コード例 #2
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    log.info(type(ceph_conf))
    rgw_service = RGWService()
    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    auth = Auth(user_info)
    rgw = auth.do_auth()

    for cc in range(config.container_count):
        if config.version_enable is True:
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf("global",
                                       ConfigOpts.rgw_swift_versioning_enabled,
                                       "True")
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
            container_name_old = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=str(cc) + "old")
            log.info(container_name_old)
            container = swiftlib.resource_op({
                "obj":
                rgw,
                "resource":
                "put_container",
                "kwargs":
                dict(container=container_name_old),
            })
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=str(cc) + "new")
            log.info(container_name)
            container = swiftlib.resource_op({
                "obj":
                rgw,
                "resource":
                "put_container",
                "args": [
                    container_name,
                    {
                        "X-Versions-Location": container_name_old
                    },
                ],
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            ls = []
            swift_object_name = ""
            for version_count in range(config.version_count):
                for oc, size in list(config.mapped_sizes.items()):
                    swift_object_name = fill_container(rgw, container_name,
                                                       user_names[0], oc, cc,
                                                       size)
                ls = rgw.get_container(container_name_old)
                ls = list(ls)
            if config.copy_version_object is True:
                old_obj_name = ls[1][config.version_count - 2]["name"]
                log.info(old_obj_name)
                container = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "copy_object",
                    "kwargs":
                    dict(
                        container=container_name_old,
                        obj=old_obj_name,
                        destination=container_name + "/" + swift_object_name,
                    ),
                })
                if container is False:
                    raise TestExecError("Resource execution failed")
                log.info("Successfully copied item")
            else:
                current_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format(
                    uid=user_names[0], tenant=tenant, bucket=container_name)
                num_obj_current = utils.exec_shell_cmd(current_count)
                num_obj_current = json.loads(num_obj_current)
                num_obj_current = (num_obj_current[0].get("usage").get(
                    "rgw.main").get("num_objects"))
                old_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format(
                    uid=user_names[0],
                    tenant=tenant,
                    bucket=container_name_old)
                num_obj_old = utils.exec_shell_cmd(old_count)
                num_obj_old = json.loads(num_obj_old)
                num_obj_old = (num_obj_old[0].get("usage").get("rgw.main").get(
                    "num_objects"))
                version_count_from_config = (
                    config.objects_count *
                    config.version_count) - config.objects_count
                if (num_obj_current == config.objects_count) and (
                        num_obj_old == version_count_from_config):
                    log.info("objects and versioned obbjects are correct")
                else:
                    test_info.failed_status("test failed")

        elif config.object_expire is True:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw,
                    container_name,
                    user_names[0],
                    oc,
                    cc,
                    size,
                    header={"X-Delete-After": 5},
                )
                time.sleep(7)
                container_exists = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "get_object",
                    "args": [container_name, swift_object_name],
                })
                log.info(container_exists)
                if container_exists:
                    msg = "test failed as the objects are still present"
                    test_info.failed_status(msg)
                    raise TestExecError(msg)

        elif config.large_object_upload is True:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw,
                    container_name,
                    user_names[0],
                    oc,
                    cc,
                    size,
                    multipart=True,
                    split_size=config.split_size,
                )
                container_name_new = utils.gen_bucket_name_from_userid(
                    user_info["user_id"], rand_no=str(cc) + "New")
                container = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "put_container",
                    "kwargs":
                    dict(container=container_name_new),
                })
                if container is False:
                    raise TestExecError(
                        "Resource execution failed: container creation failed")
                container = swiftlib.resource_op({
                    "obj":
                    rgw,
                    "resource":
                    "put_object",
                    "kwargs":
                    dict(
                        container=container_name_new,
                        obj=swift_object_name,
                        contents=None,
                        headers={
                            "X-Object-Manifest":
                            container_name + "/" + swift_object_name + "/"
                        },
                    ),
                })
                if container is False:
                    raise TestExecError(
                        "Resource execution failed: container creation failed")
                if config.large_object_download is True:
                    swift_old_object_path = os.path.join(
                        TEST_DATA_PATH, swift_object_name)
                    swift_object_download_fname = swift_object_name + ".download"
                    log.info("download object name: %s" %
                             swift_object_download_fname)
                    swift_object_download_path = os.path.join(
                        TEST_DATA_PATH, swift_object_download_fname)
                    log.info("download object path: %s" %
                             swift_object_download_path)
                    swift_object_downloaded = rgw.get_object(
                        container_name_new, swift_object_name)
                    with open(swift_object_download_path, "wb") as fp:
                        fp.write(swift_object_downloaded[1])
                    old_object = utils.get_md5(swift_old_object_path)
                    downloaded_obj = utils.get_md5(swift_object_download_path)
                    log.info("s3_object_downloaded_md5: %s" % old_object)
                    log.info("s3_object_uploaded_md5: %s" % downloaded_obj)
                    if str(old_object) == str(downloaded_obj):
                        log.info("md5 match")
                        utils.exec_shell_cmd("rm -rf %s" %
                                             swift_object_download_path)
                    else:
                        raise TestExecError("md5 mismatch")

        else:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc)
            container = swiftlib.resource_op({
                "obj": rgw,
                "resource": "put_container",
                "args": [container_name]
            })
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed")
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(rgw, container_name,
                                                   user_names[0], oc, cc, size)
                # download object
                swift_object_download_fname = swift_object_name + ".download"
                log.info("download object name: %s" %
                         swift_object_download_fname)
                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)
                log.info("download object path: %s" %
                         swift_object_download_path)
                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)
                with open(swift_object_download_path, "w") as fp:
                    fp.write(str(swift_object_downloaded[1]))
                # modify and re-upload
                log.info("appending new message to test_data")
                message_to_append = "adding new msg after download"
                fp = open(swift_object_download_path, "a+")
                fp.write(message_to_append)
                fp.close()
                with open(swift_object_download_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # delete object
                log.info("deleting swift object")
                rgw.delete_object(container_name, swift_object_name)
            # delete container
            log.info("deleting swift container")
            rgw.delete_container(container_name)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    reusable.remove_user(tenant_user_info, tenant=tenant)
コード例 #3
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            if radoslist_all_error:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
コード例 #4
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info('making changes to ceph.conf')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval))
    ceph_version = utils.exec_shell_cmd("ceph version")
    op = ceph_version.split()
    for i in op:
        if i == 'nautilus':
                ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker))
    log.info('trying to restart services')
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    config.user_count = 1
    config.bucket_count = 1
    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1)
    obj_list = []
    obj_tag = 'suffix1=WMV1'
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(map(lambda x: x,
                      [rule['Filter'].get('Prefix') or
                       rule['Filter']['And'].get('Prefix')
                       for rule in config.lifecycle_conf]))
    prefix = prefix if prefix else ['dummy1']
    if config.test_ops['enable_versioning'] is True:
        reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info)
        if config.test_ops['create_object'] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + '.' + bucket.name + '.' + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops['version_count'] > 0:
                    for vc in range(config.test_ops['version_count']):
                        log.info('version count for %s is %s' % (s3_object_name, str(vc)))
                        log.info('modifying data: %s' % s3_object_name)
                        reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info,
                                               append_data=True,
                                               append_msg='hello object for version: %s\n' % str(vc))
                else:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops['delete_marker'] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config)
    if config.test_ops['enable_versioning'] is False:
        if config.test_ops['create_object'] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + '.' + bucket.name + '.' + str(oc)
                obj_list.append(s3_object_name)
                reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config)
        lc_ops.validate_and_rule(bucket, config)
    reusable.remove_user(user_info)
コード例 #5
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info('no of buckets to create: %s' % config.bucket_count)
    # create buckets
    if config.test_ops['create_bucket'] is True:
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, user_info)
            if config.test_ops['create_object'] is True:
                # uploading data
                log.info('s3 objects to create: %s' % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, oc)
                    log.info('s3 object name: %s' % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    log.info('s3 object path: %s' % s3_object_path)
                    if config.test_ops.get('upload_type') == 'multipart':
                        log.info('upload type: multipart')
                        reusable.upload_mutipart_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                                        user_info)
                    else:
                        log.info('upload type: normal')
                        reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)

                    if config.gc_verification is True:
                        log.info('making changes to ceph.conf')
                        config.rgw_gc_obj_min_wait = 5
                        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,
                                                   str(config.rgw_gc_obj_min_wait))
                        log.info('trying to restart services')
                        srv_restarted = rgw_service.restart()
                        time.sleep(30)
                        if srv_restarted is False:
                            raise TestExecError("RGW service restart failed")
                        else:
                            log.info('RGW service restarted')
                        log.info('download the large object again to populate gc list with shadow entries')
                        reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config)
                        time.sleep(60)
                        gc_list_output = json.loads(utils.exec_shell_cmd("radosgw-admin gc list --include-all"))

                        log.info(gc_list_output)
                        
                        if gc_list_output:
                            log.info("Shadow objects found after setting the rgw_gc_obj_min_wait to 5 seconds")
                            utils.exec_shell_cmd("radosgw-admin gc process")
                            log.info('Object download should not error out in 404 NoSuchKey error')
                            reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config)

        reusable.remove_user(user_info)
コード例 #6
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    objects_created_list = []

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    # making changes to max_objects_per_shard and rgw_gc_obj_min_wait to ceph.conf
    log.info('making changes to ceph.conf')
    log.info(f'rgw_max_objs_per_shard parameter set to {str(config.max_objects_per_shard)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
    log.info(f'rgw gc obj min wait configuration parameter set to {str(config.rgw_gc_obj_min_wait)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,str(config.rgw_gc_obj_min_wait))
    sleep_time = 10
    log.info(f'Restarting RGW service and waiting for {sleep_time} seconds')
    srv_restarted = rgw_service.restart()
    time.sleep(sleep_time)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    for each_user in all_users_info:
    # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                log.info(f'creating {str(bc)} bucket')
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('enable_version', False):
                           log.info('upload versioned objects')
                           reusable.upload_version_object(config, each_user, rgw_conn, s3_object_name, config.obj_size, bucket,
                                                                              TEST_DATA_PATH)
                        else:
                            log.info('upload type: normal')
                            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                        objects_created_list.append((s3_object_name, s3_object_path))
                #deleting the local file created after upload
                if config.local_file_delete is True:
                    log.info('deleting local file created after the upload')
                    utils.exec_shell_cmd('rm -rf %s' % s3_object_path)

                # listing the objects         
                if config.test_ops.get('list_objects', False):
                    if config.test_ops.get('enable_version', False):
                        for name,path in objects_created_list:
                            reusable.list_versioned_objects(bucket,name,path,rgw_conn)
                    else:
                        reusable.list_objects(bucket)
                
                if config.test_ops.get('delete_bucket_object', False):
                    if config.test_ops.get('enable_version', False):
                        for name, path in objects_created_list:
                            print("name, path",name,path)
                            versions = bucket.object_versions.filter(Prefix=name)
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, name]})
                            log.info('deleting versions for s3 obj: %s' % name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        reusable.delete_version_object(bucket,version.version_id, path, rgw_conn, each_user)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                    else:
                        reusable.delete_objects(bucket)
                    log.info(f'deleting the bucket {bucket_name_to_create}')
                    reusable.delete_bucket(bucket)

    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    
    #remove the user
    reusable.remove_user(each_user)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 1
    config.rgw_lifecycle_work_time = "00:00-23:59"
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    rgw_service.status()
    # create user
    user_info = s3lib.create_users(config.user_count)
    for each_user in user_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        if config.test_ops["create_bucket"]:
            log.info("no of buckets to create: %s" % config.bucket_count)
            # create bucket
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=1)
                bucket = reusable.create_bucket(bucket_name, rgw_conn,
                                                each_user)
                life_cycle_rule = {"Rules": config.lifecycle_conf}
                reusable.put_bucket_lifecycle(bucket, rgw_conn, rgw_conn2,
                                              life_cycle_rule)
                if config.test_ops["create_object"]:
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        log.info(
                            f"s3 objects to create of size {config.obj_size}")
                        s3_object_name = config.lifecycle_conf[0]["Filter"][
                            "Prefix"] + str(oc)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info(
                            f"s3 object path: {s3_object_path}, name: {s3_object_name}"
                        )
                        reusable.upload_mutipart_object(
                            s3_object_name, bucket, TEST_DATA_PATH, config,
                            each_user)

                for i in (1, 100):
                    time.sleep(60)
                    bucket_details = json.loads(
                        utils.exec_shell_cmd(
                            f"radosgw-admin bucket stats --bucket={bucket.name}"
                        ))
                    if bucket_details["usage"]["rgw.main"]["num_objects"] == 0:
                        break
                else:
                    raise TestExecError(
                        "Bucket object expiration taking longer than expected")

                gc_list_output = json.loads(
                    utils.exec_shell_cmd(
                        "radosgw-admin gc list --include-all"))
                if gc_list_output:
                    log.info("Removing shadow objects found")
                    utils.exec_shell_cmd(
                        "radosgw-admin gc process --include-all")

                bucket_id = (bucket_details["id"] + "_" +
                             config.lifecycle_conf[0]["Filter"]["Prefix"])
                log.info(
                    f"check for all the entry {bucket_id} for the bucket in data pool"
                )
                obj_pool = utils.exec_shell_cmd(
                    f"rados ls -p default.rgw.buckets.data | grep {bucket_id}")
                if obj_pool:
                    for obj in obj_pool:
                        object_name = obj.split("_")[-1]
                        log.info(f"s3 object name to download: {object_name}")
                        object_name_downloaded = object_name + "." + "download"
                        object_download_path = os.path.join(
                            TEST_DATA_PATH, object_name_downloaded)
                        object_downloaded_status = s3lib.resource_op({
                            "obj":
                            bucket,
                            "resource":
                            "download_file",
                            "args": [object_name, object_download_path],
                        })
                        if object_downloaded_status is False:
                            log.info("As expected object is not Downloadable")
                        if object_downloaded_status is None:
                            raise TestExecError(
                                "Objects are not listed but can be downloadable"
                            )

                if config.local_file_delete:
                    log.info("deleting local file created after the upload")
                    utils.exec_shell_cmd(f"rm -rf {TEST_DATA_PATH}")

                reusable.delete_bucket(bucket)
        reusable.remove_user(each_user)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
コード例 #8
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    if config.test_ops['object_structure'] == 'flat':
                        # uploading data
                        log.info('top level s3 objects to create: %s' %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info('s3 object name: %s' % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info('s3 object path: %s' % s3_object_path)
                            if config.test_ops.get(
                                    'upload_type') == 'multipart':
                                log.info('upload type: multipart')
                                reusable.upload_mutipart_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            else:
                                log.info('upload type: normal')
                                reusable.upload_object(s3_object_name, bucket,
                                                       TEST_DATA_PATH, config,
                                                       each_user)
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            #deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    'deleting local file created after the upload'
                                )
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_path)

                    #this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops['object_structure'] == 'pseudo':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info('s3 object name: %s' % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info('s3 object path: %s' % s3_object_path)
                                if config.test_ops.get(
                                        'upload_type') == 'multipart':
                                    log.info('upload type: multipart')
                                    reusable.upload_mutipart_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                else:
                                    log.info('upload type: normal')
                                    reusable.upload_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                #deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        'deleting local file created after the upload'
                                    )
                                    utils.exec_shell_cmd('rm -rf %s' %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops['create_object'] is False:
                    if config.test_ops[
                            'object_structure'] == 'pseudo-dir-only':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count}'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops['radoslist'] is True:
                    log.info(
                        'executing the command radosgw-admin bucket radoslist '
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter
                cmd = 'ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio'
                max_aio_output = utils.exec_shell_cmd(cmd)
                max_aio = max_aio_output.split()[1]

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json['usage']['rgw.main'][
                    'num_objects']

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    'measure the execution time taken to list via radosgw-admin command'
                )
                if config.test_ops['radosgw_listing_ordered'] is True:
                    log.info('ordered listing via radosgw-admin command')
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'ordered')
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops['radosgw_listing_ordered'] is False:
                    log.info('unordered listing via radosgw-admin command')
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'unordered')
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info('measure the execution time taken to list via boto')
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f'with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins'
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        if config.test_ops.get('delete_bucket_object', False):
            if config.test_ops.get('enable_version', False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)