def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    log.info('starting IO')
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info('sharding configuration will be added now.')
    if config.sharding_type == 'dynamic':
        log.info('sharding type is dynamic')
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info('making changes to ceph.conf')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard,
                                   str(config.max_objects_per_shard))
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info('num_shards_expected: %s' % num_shards_expected)
        log.info('trying to restart services ')
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    config.bucket_count = 1
    objects_created_list = []
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'],
                                                    rand_no=1)
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    if config.test_ops.get('enable_version', False):
        log.info('enable bucket version')
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
    log.info('s3 objects to create: %s' % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
        if config.test_ops.get('enable_version', False):
            reusable.upload_version_object(config, user_info, rgw_conn,
                                           s3_object_name, config.obj_size,
                                           bucket, TEST_DATA_PATH)
        else:
            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                   config, user_info)
        objects_created_list.append((s3_object_name, s3_object_path))

    if config.sharding_type == 'manual':
        log.info('sharding type is manual')
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info('in manual sharding')
        cmd_exec = utils.exec_shell_cmd(
            'radosgw-admin bucket reshard --bucket=%s --num-shards=%s '
            '--yes-i-really-mean-it' % (bucket.name, config.shards))
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    sleep_time = 600
    log.info(f'verification starts after waiting for {sleep_time} seconds')
    time.sleep(sleep_time)
    op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" %
                              bucket.name)
    json_doc = json.loads(op)
    bucket_id = json_doc['data']['bucket']['bucket_id']
    op2 = utils.exec_shell_cmd(
        "radosgw-admin metadata get bucket.instance:%s:%s" %
        (bucket.name, bucket_id))
    json_doc2 = json.loads((op2))
    num_shards_created = json_doc2['data']['bucket_info']['num_shards']
    log.info('no_of_shards_created: %s' % num_shards_created)
    if config.sharding_type == 'manual':
        if config.shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info('Expected number of shards created')
    if config.sharding_type == 'dynamic':
        log.info('Verify if resharding list is empty')
        reshard_list_op = json.loads(
            utils.exec_shell_cmd("radosgw-admin reshard list"))
        if not reshard_list_op:
            log.info(
                'for dynamic number of shards created should be greater than or equal to number of expected shards'
            )
            log.info('no_of_shards_expected: %s' % num_shards_expected)
            if int(num_shards_created) >= int(num_shards_expected):
                log.info('Expected number of shards created')
        else:
            raise TestExecError('Expected number of shards not created')

    if config.test_ops.get('delete_bucket_object', False):
        if config.test_ops.get('enable_version', False):
            for name, path in objects_created_list:
                reusable.delete_version_object(bucket, name, path, rgw_conn,
                                               user_info)
        else:
            reusable.delete_objects(bucket)
        reusable.delete_bucket(bucket)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # check the default data log backing
    default_data_log = reusable.get_default_datalog_type()
    log.info(f"{default_data_log} is the default data log backing")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        objects_created_list = []

        # change the default datalog backing to FIFO
        if config.test_ops.get("change_datalog_backing", False):
            logtype = config.test_ops["change_datalog_backing"]
            log.info(f"change default datalog backing to {logtype}")
            cmd = f"radosgw-admin datalog type --log-type={logtype}"
            change_datalog_type = utils.exec_shell_cmd(cmd)
            if change_datalog_type is False:
                raise TestExecError("Failed to change the datalog type to fifo")
            log.info(
                "restart the rgw daemons and sleep of 30secs for rgw daemon to be up "
            )
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(
                        bucket, rgw_conn, each_user, write_bucket_io_info
                    )
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info(
                        "top level s3 objects to create: %s" % config.objects_count
                    )
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        objects_created_list.append((s3_object_name, s3_object_path))
                        # deleting the local file created after upload
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)

        # delete  object and bucket
        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(
                        bucket, name, path, rgw_conn, each_user
                    )
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check for any ERRORs in datalog list. ref- https://bugzilla.redhat.com/show_bug.cgi?id=1917687
    error_in_data_log_list = reusable.check_datalog_list()
    if error_in_data_log_list:
        raise TestExecError("Error in datalog list")

    # check for data log markers. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1831798#c22
    data_log_marker = reusable.check_datalog_marker()
    log.info(f"The data_log_marker is: {data_log_marker}")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()

        # authenticate sns client.
        rgw_sns_conn = auth.do_auth_sns_client()

        # authenticate with s3 client
        rgw_s3_client = auth.do_auth_using_client()

        # get ceph version
        ceph_version_id, ceph_version_name = utils.get_ceph_version()

        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)

                # create topic with endpoint
                if config.test_ops["create_topic"] is True:
                    endpoint = config.test_ops.get("endpoint")
                    ack_type = config.test_ops.get("ack_type")
                    topic_id = str(uuid.uuid4().hex[:16])
                    persistent = False
                    topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id
                    log.info(
                        f"creating a topic with {endpoint} endpoint with ack type {ack_type}"
                    )
                    if config.test_ops.get("persistent_flag", False):
                        log.info("topic with peristent flag enabled")
                        persistent = config.test_ops.get("persistent_flag")
                    topic = notification.create_topic(rgw_sns_conn, endpoint,
                                                      ack_type, topic_name,
                                                      persistent)

                # get topic attributes
                if config.test_ops.get("get_topic_info", False):
                    log.info("get topic attributes")
                    get_topic_info = notification.get_topic(
                        rgw_sns_conn, topic, ceph_version_name)

                # put bucket notification with topic configured for event
                if config.test_ops["put_get_bucket_notification"] is True:
                    event = config.test_ops.get("event_type")
                    notification_name = "notification-" + str(event)
                    notification.put_bucket_notification(
                        rgw_s3_client,
                        bucket_name_to_create,
                        notification_name,
                        topic,
                        event,
                    )

                    # get bucket notification
                    log.info(
                        f"get bucket notification for bucket : {bucket_name_to_create}"
                    )
                    notification.get_bucket_notification(
                        rgw_s3_client, bucket_name_to_create)

                # create objects
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                # copy objects
                if config.test_ops.get("copy_object", False):
                    log.info("copy object")
                    status = rgw_s3_client.copy_object(
                        Bucket=bucket_name_to_create,
                        Key="copy_of_object" + s3_object_name,
                        CopySource={
                            "Bucket": bucket_name_to_create,
                            "Key": s3_object_name,
                        },
                    )
                    if status is None:
                        raise TestExecError("copy object failed")

            # delete objects
            if config.test_ops.get("delete_bucket_object", False):
                if config.test_ops.get("enable_version", False):
                    for name, path in objects_created_list:
                        reusable.delete_version_object(bucket, name, path,
                                                       rgw_conn, each_user)
                else:
                    reusable.delete_objects(bucket)

            # start kafka broker and consumer
            event_record_path = "/home/cephuser/event_record"
            start_consumer = notification.start_kafka_broker_consumer(
                topic_name, event_record_path)
            if start_consumer is False:
                raise TestExecError("Kafka consumer not running")

            # verify all the attributes of the event record. if event not received abort testcase
            log.info("verify event record attributes")
            verify = notification.verify_event_record(event,
                                                      bucket_name_to_create,
                                                      event_record_path,
                                                      ceph_version_name)
            if verify is False:
                raise EventRecordDataError(
                    "Event record is empty! notification is not seen")

        # delete topic logs on kafka broker
        notification.del_topic_from_kafka_broker(topic_name)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Esempio n. 4
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            if radoslist_all_error:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    extra_user = s3lib.create_users(1)[0]
    extra_user_auth = Auth(extra_user, ssl=config.ssl)
    extra_user_conn = extra_user_auth.do_auth()
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        s3_object_names = []
        # create buckets
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                each_user["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = s3lib.resource_op({
                "obj": rgw_conn,
                "resource": "Bucket",
                "args": [bucket_name_to_create]
            })
            # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
            created = s3lib.resource_op({
                "obj": bucket,
                "resource": "create",
                "args": None,
                "extra_info": {
                    "access_key": each_user["access_key"]
                },
            })
            if created is False:
                raise TestExecError(
                    "Resource execution failed: bucket creation faield")
            if created is not None:
                response = HttpResponseParser(created)
                if response.status_code == 200:
                    log.info("bucket created")
                else:
                    raise TestExecError("bucket creation failed")
            else:
                raise TestExecError("bucket creation failed")
            # getting bucket version object
            if config.test_ops["enable_version"] is True:
                log.info("bucket versionig test on bucket: %s" % bucket.name)
                # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                bucket_versioning = s3lib.resource_op({
                    "obj": rgw_conn,
                    "resource": "BucketVersioning",
                    "args": [bucket.name],
                })
                # checking the versioning status
                # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                version_status = s3lib.resource_op({
                    "obj": bucket_versioning,
                    "resource": "status",
                    "args": None
                })
                if version_status is None:
                    log.info("bucket versioning still not enabled")
                # enabling bucket versioning
                # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                version_enable_status = s3lib.resource_op({
                    "obj": bucket_versioning,
                    "resource": "enable",
                    "args": None,
                })
                response = HttpResponseParser(version_enable_status)
                if response.status_code == 200:
                    log.info("version enabled")
                    write_bucket_io_info.add_versioning_status(
                        each_user["access_key"],
                        bucket.name,
                        VERSIONING_STATUS["ENABLED"],
                    )

                else:
                    raise TestExecError("version enable failed")
                if config.objects_count > 0:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, s3_object_size in list(
                            config.mapped_sizes.items()):
                        # versioning upload
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, str(oc))
                        s3_object_names.append(s3_object_name)
                        log.info("s3 object name: %s" % s3_object_name)
                        log.info("versioning count: %s" % config.version_count)
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, str(oc))
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        original_data_info = manage_data.io_generator(
                            s3_object_path, s3_object_size)
                        if original_data_info is False:
                            TestExecError("data creation failed")
                        created_versions_count = 0
                        for vc in range(config.version_count):
                            log.info("version count for %s is %s" %
                                     (s3_object_name, str(vc)))
                            log.info("modifying data: %s" % s3_object_name)
                            modified_data_info = manage_data.io_generator(
                                s3_object_path,
                                s3_object_size,
                                op="append",
                                **{
                                    "message":
                                    "\nhello for version: %s\n" % str(vc)
                                })
                            if modified_data_info is False:
                                TestExecError("data modification failed")
                            log.info("uploading s3 object: %s" %
                                     s3_object_path)
                            upload_info = dict(
                                {
                                    "access_key":
                                    each_user["access_key"],
                                    "versioning_status":
                                    VERSIONING_STATUS["ENABLED"],
                                    "version_count_no":
                                    vc,
                                }, **modified_data_info)
                            s3_obj = s3lib.resource_op({
                                "obj":
                                bucket,
                                "resource":
                                "Object",
                                "args": [s3_object_name],
                                "extra_info":
                                upload_info,
                            })
                            object_uploaded_status = s3lib.resource_op({
                                "obj":
                                s3_obj,
                                "resource":
                                "upload_file",
                                "args": [modified_data_info["name"]],
                                "extra_info":
                                upload_info,
                            })
                            if object_uploaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object upload failed"
                                )
                            if object_uploaded_status is None:
                                log.info("object uploaded")
                                s3_obj = rgw_conn.Object(
                                    bucket.name, s3_object_name)
                                log.info("current_version_id: %s" %
                                         s3_obj.version_id)
                                key_version_info = basic_io_structure.version_info(
                                    **{
                                        "version_id": s3_obj.version_id,
                                        "md5_local": upload_info["md5"],
                                        "count_no": vc,
                                        "size": upload_info["size"],
                                    })
                                log.info("key_version_info: %s" %
                                         key_version_info)
                                write_key_io_info.add_versioning_info(
                                    each_user["access_key"],
                                    bucket.name,
                                    s3_object_path,
                                    key_version_info,
                                )
                                created_versions_count += 1
                                log.info("created_versions_count: %s" %
                                         created_versions_count)
                                log.info("adding metadata")
                                metadata1 = {
                                    "m_data1": "this is the meta1 for this obj"
                                }
                                s3_obj.metadata.update(metadata1)
                                metadata2 = {
                                    "m_data2": "this is the meta2 for this obj"
                                }
                                s3_obj.metadata.update(metadata2)
                                log.info("metadata for this object: %s" %
                                         s3_obj.metadata)
                                log.info("metadata count for object: %s" %
                                         (len(s3_obj.metadata)))
                                if not s3_obj.metadata:
                                    raise TestExecError(
                                        "metadata not created even adding metadata"
                                    )
                                versions = bucket.object_versions.filter(
                                    Prefix=s3_object_name)
                                created_versions_count_from_s3 = len(
                                    [v.version_id for v in versions])
                                log.info("created versions count on s3: %s" %
                                         created_versions_count_from_s3)
                                if (created_versions_count is
                                        created_versions_count_from_s3):
                                    log.info(
                                        "no new versions are created when added metdata"
                                    )
                                else:
                                    raise TestExecError(
                                        "version count missmatch, "
                                        "possible creation of version on adding metadata"
                                    )
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name + ".download")
                            object_downloaded_status = s3lib.resource_op({
                                "obj":
                                bucket,
                                "resource":
                                "download_file",
                                "args":
                                [s3_object_name, s3_object_download_path],
                            })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            # checking md5 of the downloaded file
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            log.info("downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("uploaded_md5: %s" %
                                     modified_data_info["md5"])
                            # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path)
                        log.info("all versions for the object: %s\n" %
                                 s3_object_name)
                        versions = bucket.object_versions.filter(
                            Prefix=s3_object_name)
                        for version in versions:
                            log.info("key_name: %s --> version_id: %s" %
                                     (version.object_key, version.version_id))
                        if config.test_ops.get("set_acl", None) is True:
                            s3_obj_acl = s3lib.resource_op({
                                "obj":
                                rgw_conn,
                                "resource":
                                "ObjectAcl",
                                "args": [bucket.name, s3_object_name],
                            })
                            # setting acl to private, just need to set to any acl and
                            # check if its set - check by response code
                            acls_set_status = s3_obj_acl.put(ACL="private")
                            response = HttpResponseParser(acls_set_status)
                            if response.status_code == 200:
                                log.info("ACLs set")
                            else:
                                raise TestExecError("Acls not Set")
                            # get obj details based on version id
                            for version in versions:
                                log.info("getting info for version id: %s" %
                                         version.version_id)
                                obj = s3lib.resource_op({
                                    "obj":
                                    rgw_conn,
                                    "resource":
                                    "Object",
                                    "args": [bucket.name, s3_object_name],
                                })
                                log.info(
                                    "obj get detils :%s\n" %
                                    (obj.get(VersionId=version.version_id)))
                        if config.test_ops["copy_to_version"] is True:
                            # reverting object to one of the versions ( randomly chosen )
                            version_id_to_copy = random.choice(
                                [v.version_id for v in versions])
                            log.info("version_id_to_copy: %s" %
                                     version_id_to_copy)
                            s3_obj = rgw_conn.Object(bucket.name,
                                                     s3_object_name)
                            log.info("current version_id: %s" %
                                     s3_obj.version_id)
                            copy_response = s3_obj.copy_from(
                                CopySource={
                                    "Bucket": bucket.name,
                                    "Key": s3_object_name,
                                    "VersionId": version_id_to_copy,
                                })
                            log.info("copy_response: %s" % copy_response)
                            if copy_response is None:
                                raise TestExecError(
                                    "copy object from version id failed")
                            # current_version_id = copy_response['VersionID']
                            log.info("current_version_id: %s" %
                                     s3_obj.version_id)
                            # delete the version_id_to_copy object
                            s3_obj.delete(VersionId=version_id_to_copy)
                            log.info(
                                "all versions for the object after the copy operation: %s\n"
                                % s3_object_name)
                            for version in versions:
                                log.info(
                                    "key_name: %s --> version_id: %s" %
                                    (version.object_key, version.version_id))
                            # log.info('downloading current s3object: %s' % s3_object_name)
                            # s3_obj.download_file(s3_object_name + ".download")
                        if config.test_ops["delete_object_versions"] is True:
                            log.info("deleting s3_obj keys and its versions")
                            s3_obj = s3lib.resource_op({
                                "obj":
                                rgw_conn,
                                "resource":
                                "Object",
                                "args": [bucket.name, s3_object_name],
                            })
                            log.info("deleting versions for s3 obj: %s" %
                                     s3_object_name)
                            for version in versions:
                                log.info("trying to delete obj version: %s" %
                                         version.version_id)
                                del_obj_version = s3lib.resource_op({
                                    "obj":
                                    s3_obj,
                                    "resource":
                                    "delete",
                                    "kwargs":
                                    dict(VersionId=version.version_id),
                                })
                                log.info("response:\n%s" % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(
                                        del_obj_version)
                                    if response.status_code == 204:
                                        log.info("version deleted ")
                                        reusable.delete_version_object(
                                            bucket,
                                            version.version_id,
                                            s3_object_path,
                                            rgw_conn,
                                            each_user,
                                        )
                                    else:
                                        raise TestExecError(
                                            "version  deletion failed")
                                else:
                                    raise TestExecError(
                                        "version deletion failed")
                            log.info("available versions for the object")
                            versions = bucket.object_versions.filter(
                                Prefix=s3_object_name)
                            for version in versions:
                                log.info(
                                    "key_name: %s --> version_id: %s" %
                                    (version.object_key, version.version_id))
                        if config.test_ops.get(
                                "delete_from_extra_user") is True:
                            log.info(
                                "trying to delete objects from extra user")
                            s3_obj = s3lib.resource_op({
                                "obj":
                                extra_user_conn,
                                "resource":
                                "Object",
                                "args": [bucket.name, s3_object_name],
                            })
                            log.info("deleting versions for s3 obj: %s" %
                                     s3_object_name)
                            for version in versions:
                                log.info("trying to delete obj version: %s" %
                                         version.version_id)
                                del_obj_version = s3lib.resource_op({
                                    "obj":
                                    s3_obj,
                                    "resource":
                                    "delete",
                                    "kwargs":
                                    dict(VersionId=version.version_id),
                                })
                                log.info("response:\n%s" % del_obj_version)
                                if del_obj_version is not False:
                                    response = HttpResponseParser(
                                        del_obj_version)
                                    if response.status_code == 204:
                                        log.info("version deleted ")
                                        write_key_io_info.delete_version_info(
                                            each_user["access_key"],
                                            bucket.name,
                                            s3_object_path,
                                            version.version_id,
                                        )
                                        raise TestExecError(
                                            "version and deleted, this should not happen"
                                        )
                                    else:
                                        log.info(
                                            "version did not delete, expected behaviour"
                                        )
                                else:
                                    log.info(
                                        "version did not delete, expected behaviour"
                                    )
                        if config.local_file_delete is True:
                            log.info("deleting local file")
                            utils.exec_shell_cmd("sudo rm -rf %s" %
                                                 s3_object_path)
                if config.test_ops["suspend_version"] is True:
                    log.info("suspending versioning")
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                    suspend_version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "suspend",
                        "args": None
                    })
                    response = HttpResponseParser(suspend_version_status)
                    if response.status_code == 200:
                        log.info("versioning suspended")
                        write_bucket_io_info.add_versioning_status(
                            each_user["access_key"],
                            bucket.name,
                            VERSIONING_STATUS["SUSPENDED"],
                        )
                    else:
                        raise TestExecError("version suspend failed")
                    # getting all objects in the bucket
                    log.info("getting all objects in the bucket")
                    objects = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "objects",
                        "args": None
                    })
                    log.info("objects :%s" % objects)
                    all_objects = s3lib.resource_op({
                        "obj": objects,
                        "resource": "all",
                        "args": None
                    })
                    log.info("all objects: %s" % all_objects)
                    log.info("all objects2 :%s " % bucket.objects.all())
                    for obj in all_objects:
                        log.info("object_name: %s" % obj.key)
                        versions = bucket.object_versions.filter(
                            Prefix=obj.key)
                        log.info("displaying all versions of the object")
                        for version in versions:
                            log.info("key_name: %s --> version_id: %s" %
                                     (version.object_key, version.version_id))
                if config.test_ops.get("suspend_from_extra_user") is True:
                    log.info("suspending versioning from extra user")
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')

                    bucket_versioning = s3lib.resource_op({
                        "obj":
                        extra_user_conn,
                        "resource":
                        "BucketVersioning",
                        "args": [bucket.name],
                    })

                    suspend_version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "suspend",
                        "args": None
                    })
                    if suspend_version_status is not False:
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info("versioning suspended")
                            write_bucket_io_info.add_versioning_status(
                                each_user["access_key"],
                                bucket.name,
                                VERSIONING_STATUS["SUSPENDED"],
                            )
                            raise TestExecError(
                                "version suspended, this should not happen")
                    else:
                        log.info(
                            "versioning not suspended, expected behaviour")
            if config.test_ops.get("upload_after_suspend") is True:
                log.info(
                    "trying to upload after suspending versioning on bucket")
                for oc, s3_object_size in list(config.mapped_sizes.items()):
                    # non versioning upload
                    s3_object_name = s3_object_names[
                        oc] + ".after_version_suspending"
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    non_version_data_info = manage_data.io_generator(
                        s3_object_path,
                        s3_object_size,
                        op="append",
                        **{"message": "\nhello for non version\n"})
                    if non_version_data_info is False:
                        TestExecError("data creation failed")
                    log.info("uploading s3 object: %s" % s3_object_path)
                    upload_info = dict(
                        {
                            "access_key": each_user["access_key"],
                            "versioning_status": "suspended",
                        }, **non_version_data_info)
                    s3_obj = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "Object",
                        "args": [s3_object_name],
                        "extra_info": upload_info,
                    })
                    object_uploaded_status = s3lib.resource_op({
                        "obj":
                        s3_obj,
                        "resource":
                        "upload_file",
                        "args": [non_version_data_info["name"]],
                        "extra_info":
                        upload_info,
                    })

                    if object_uploaded_status is False:
                        raise TestExecError(
                            "Resource execution failed: object upload failed")
                    if object_uploaded_status is None:
                        log.info("object uploaded")
                    s3_obj = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "Object",
                        "args": [bucket.name, s3_object_name],
                    })
                    log.info("version_id: %s" % s3_obj.version_id)
                    if s3_obj.version_id is None:
                        log.info("Versions are not created after suspending")
                    else:
                        raise TestExecError(
                            "Versions are created even after suspending")
                    s3_object_download_path = os.path.join(
                        TEST_DATA_PATH, s3_object_name + ".download")
                    object_downloaded_status = s3lib.resource_op({
                        "obj":
                        bucket,
                        "resource":
                        "download_file",
                        "args": [s3_object_name, s3_object_download_path],
                    })
                    if object_downloaded_status is False:
                        raise TestExecError(
                            "Resource execution failed: object download failed"
                        )
                    if object_downloaded_status is None:
                        log.info("object downloaded")
                    # checking md5 of the downloaded file
                    s3_object_downloaded_md5 = utils.get_md5(
                        s3_object_download_path)
                    log.info("s3_object_downloaded_md5: %s" %
                             s3_object_downloaded_md5)
                    log.info("s3_object_uploaded_md5: %s" %
                             non_version_data_info["md5"])
                    if config.local_file_delete is True:
                        utils.exec_shell_cmd("sudo rm -rf %s" % s3_object_path)
            if config.test_ops.get("delete_bucket") is True:
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    log.info("starting IO")
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info("sharding configuration will be added now.")
    if config.sharding_type == "dynamic":
        log.info("sharding type is dynamic")
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info("making changes to ceph.conf")
        ceph_conf.set_to_ceph_conf(
            "global",
            ConfigOpts.rgw_max_objs_per_shard,
            str(config.max_objects_per_shard),
        )
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_dynamic_resharding,
                                   "True")
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info("num_shards_expected: %s" % num_shards_expected)
        log.info("trying to restart services ")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    config.bucket_count = 1
    objects_created_list = []
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                    rand_no=1)
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    if config.test_ops.get("enable_version", False):
        log.info("enable bucket version")
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
    log.info("s3 objects to create: %s" % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
        if config.test_ops.get("enable_version", False):
            reusable.upload_version_object(
                config,
                user_info,
                rgw_conn,
                s3_object_name,
                config.obj_size,
                bucket,
                TEST_DATA_PATH,
            )
        else:
            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                   config, user_info)
        objects_created_list.append((s3_object_name, s3_object_path))

    if config.sharding_type == "manual":
        log.info("sharding type is manual")
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info("in manual sharding")
        cmd_exec = utils.exec_shell_cmd(
            "radosgw-admin bucket reshard --bucket=%s --num-shards=%s "
            "--yes-i-really-mean-it" % (bucket.name, config.shards))
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    sleep_time = 600
    log.info(f"verification starts after waiting for {sleep_time} seconds")
    time.sleep(sleep_time)
    op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket %s" %
                              bucket.name)
    json_doc = json.loads(op)
    num_shards_created = json_doc["num_shards"]
    log.info("no_of_shards_created: %s" % num_shards_created)
    if config.sharding_type == "manual":
        if config.shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info("Expected number of shards created")
    if config.sharding_type == "dynamic":
        log.info("Verify if resharding list is empty")
        reshard_list_op = json.loads(
            utils.exec_shell_cmd("radosgw-admin reshard list"))
        if not reshard_list_op:
            log.info(
                "for dynamic number of shards created should be greater than or equal to number of expected shards"
            )
            log.info("no_of_shards_expected: %s" % num_shards_expected)
            if int(num_shards_created) >= int(num_shards_expected):
                log.info("Expected number of shards created")
        else:
            raise TestExecError("Expected number of shards not created")

    if config.test_ops.get("delete_bucket_object", False):
        if config.test_ops.get("enable_version", False):
            for name, path in objects_created_list:
                reusable.delete_version_object(bucket, name, path, rgw_conn,
                                               user_info)
        else:
            reusable.delete_objects(bucket)
        reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Esempio n. 7
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    objects_created_list = []

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    # making changes to max_objects_per_shard and rgw_gc_obj_min_wait to ceph.conf
    log.info('making changes to ceph.conf')
    log.info(f'rgw_max_objs_per_shard parameter set to {str(config.max_objects_per_shard)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
    log.info(f'rgw gc obj min wait configuration parameter set to {str(config.rgw_gc_obj_min_wait)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,str(config.rgw_gc_obj_min_wait))
    sleep_time = 10
    log.info(f'Restarting RGW service and waiting for {sleep_time} seconds')
    srv_restarted = rgw_service.restart()
    time.sleep(sleep_time)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    for each_user in all_users_info:
    # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                log.info(f'creating {str(bc)} bucket')
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('enable_version', False):
                           log.info('upload versioned objects')
                           reusable.upload_version_object(config, each_user, rgw_conn, s3_object_name, config.obj_size, bucket,
                                                                              TEST_DATA_PATH)
                        else:
                            log.info('upload type: normal')
                            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                        objects_created_list.append((s3_object_name, s3_object_path))
                #deleting the local file created after upload
                if config.local_file_delete is True:
                    log.info('deleting local file created after the upload')
                    utils.exec_shell_cmd('rm -rf %s' % s3_object_path)

                # listing the objects         
                if config.test_ops.get('list_objects', False):
                    if config.test_ops.get('enable_version', False):
                        for name,path in objects_created_list:
                            reusable.list_versioned_objects(bucket,name,path,rgw_conn)
                    else:
                        reusable.list_objects(bucket)
                
                if config.test_ops.get('delete_bucket_object', False):
                    if config.test_ops.get('enable_version', False):
                        for name, path in objects_created_list:
                            print("name, path",name,path)
                            versions = bucket.object_versions.filter(Prefix=name)
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, name]})
                            log.info('deleting versions for s3 obj: %s' % name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        reusable.delete_version_object(bucket,version.version_id, path, rgw_conn, each_user)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                    else:
                        reusable.delete_objects(bucket)
                    log.info(f'deleting the bucket {bucket_name_to_create}')
                    reusable.delete_bucket(bucket)

    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    
    #remove the user
    reusable.remove_user(each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    if config.test_ops['object_structure'] == 'flat':
                        # uploading data
                        log.info('top level s3 objects to create: %s' %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info('s3 object name: %s' % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info('s3 object path: %s' % s3_object_path)
                            if config.test_ops.get(
                                    'upload_type') == 'multipart':
                                log.info('upload type: multipart')
                                reusable.upload_mutipart_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            else:
                                log.info('upload type: normal')
                                reusable.upload_object(s3_object_name, bucket,
                                                       TEST_DATA_PATH, config,
                                                       each_user)
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            #deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    'deleting local file created after the upload'
                                )
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_path)

                    #this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops['object_structure'] == 'pseudo':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info('s3 object name: %s' % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info('s3 object path: %s' % s3_object_path)
                                if config.test_ops.get(
                                        'upload_type') == 'multipart':
                                    log.info('upload type: multipart')
                                    reusable.upload_mutipart_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                else:
                                    log.info('upload type: normal')
                                    reusable.upload_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                #deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        'deleting local file created after the upload'
                                    )
                                    utils.exec_shell_cmd('rm -rf %s' %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops['create_object'] is False:
                    if config.test_ops[
                            'object_structure'] == 'pseudo-dir-only':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count}'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops['radoslist'] is True:
                    log.info(
                        'executing the command radosgw-admin bucket radoslist '
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter
                cmd = 'ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio'
                max_aio_output = utils.exec_shell_cmd(cmd)
                max_aio = max_aio_output.split()[1]

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json['usage']['rgw.main'][
                    'num_objects']

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    'measure the execution time taken to list via radosgw-admin command'
                )
                if config.test_ops['radosgw_listing_ordered'] is True:
                    log.info('ordered listing via radosgw-admin command')
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'ordered')
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops['radosgw_listing_ordered'] is False:
                    log.info('unordered listing via radosgw-admin command')
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'unordered')
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info('measure the execution time taken to list via boto')
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f'with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins'
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        if config.test_ops.get('delete_bucket_object', False):
            if config.test_ops.get('enable_version', False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)