Example #1
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    _, version_name = utils.get_ceph_version()
    if "nautilus" in version_name:
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker,
                                   str(config.rgw_lc_max_worker))
    else:
        ceph_conf.set_to_ceph_conf(
            section=None,
            option=ConfigOpts.rgw_lc_max_worker,
            value=str(config.rgw_lc_max_worker),
        )
        ceph_conf.set_to_ceph_conf(section=None,
                                   option=ConfigOpts.rgw_lc_debug_interval,
                                   value="30")
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    config.user_count = 1
    config.bucket_count = 1
    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                    rand_no=1)
    obj_list = []
    obj_tag = "suffix1=WMV1"
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(
        map(
            lambda x: x,
            [
                rule["Filter"].get("Prefix")
                or rule["Filter"]["And"].get("Prefix")
                for rule in config.lifecycle_conf
            ],
        ))
    prefix = prefix if prefix else ["dummy1"]
    if config.test_ops["enable_versioning"] is True:
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops["version_count"] > 0:
                    for vc in range(config.test_ops["version_count"]):
                        log.info("version count for %s is %s" %
                                 (s3_object_name, str(vc)))
                        log.info("modifying data: %s" % s3_object_name)
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            user_info,
                            append_data=True,
                            append_msg="hello object for version: %s\n" %
                            str(vc),
                        )
                else:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    reusable.upload_object(s3_object_name, bucket,
                                           TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops["delete_marker"] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                                   life_cycle_rule_new, config)
    if config.test_ops["enable_versioning"] is False:
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                reusable.upload_object_with_tagging(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_and_rule(bucket, config)
    reusable.remove_user(user_info)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    log.info('starting IO')
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info('sharding configuration will be added now.')
    if config.sharding_type == 'dynamic':
        log.info('sharding type is dynamic')
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info('making changes to ceph.conf')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard,
                                   str(config.max_objects_per_shard))
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info('num_shards_expected: %s' % num_shards_expected)
        log.info('trying to restart services ')
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    config.bucket_count = 1
    objects_created_list = []
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'],
                                                    rand_no=1)
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    if config.test_ops.get('enable_version', False):
        log.info('enable bucket version')
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
    log.info('s3 objects to create: %s' % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
        if config.test_ops.get('enable_version', False):
            reusable.upload_version_object(config, user_info, rgw_conn,
                                           s3_object_name, config.obj_size,
                                           bucket, TEST_DATA_PATH)
        else:
            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                   config, user_info)
        objects_created_list.append((s3_object_name, s3_object_path))

    if config.sharding_type == 'manual':
        log.info('sharding type is manual')
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info('in manual sharding')
        cmd_exec = utils.exec_shell_cmd(
            'radosgw-admin bucket reshard --bucket=%s --num-shards=%s '
            '--yes-i-really-mean-it' % (bucket.name, config.shards))
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    sleep_time = 600
    log.info(f'verification starts after waiting for {sleep_time} seconds')
    time.sleep(sleep_time)
    op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" %
                              bucket.name)
    json_doc = json.loads(op)
    bucket_id = json_doc['data']['bucket']['bucket_id']
    op2 = utils.exec_shell_cmd(
        "radosgw-admin metadata get bucket.instance:%s:%s" %
        (bucket.name, bucket_id))
    json_doc2 = json.loads((op2))
    num_shards_created = json_doc2['data']['bucket_info']['num_shards']
    log.info('no_of_shards_created: %s' % num_shards_created)
    if config.sharding_type == 'manual':
        if config.shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info('Expected number of shards created')
    if config.sharding_type == 'dynamic':
        log.info('Verify if resharding list is empty')
        reshard_list_op = json.loads(
            utils.exec_shell_cmd("radosgw-admin reshard list"))
        if not reshard_list_op:
            log.info(
                'for dynamic number of shards created should be greater than or equal to number of expected shards'
            )
            log.info('no_of_shards_expected: %s' % num_shards_expected)
            if int(num_shards_created) >= int(num_shards_expected):
                log.info('Expected number of shards created')
        else:
            raise TestExecError('Expected number of shards not created')

    if config.test_ops.get('delete_bucket_object', False):
        if config.test_ops.get('enable_version', False):
            for name, path in objects_created_list:
                reusable.delete_version_object(bucket, name, path, rgw_conn,
                                               user_info)
        else:
            reusable.delete_objects(bucket)
        reusable.delete_bucket(bucket)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # check the default data log backing
    default_data_log = reusable.get_default_datalog_type()
    log.info(f"{default_data_log} is the default data log backing")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        objects_created_list = []

        # change the default datalog backing to FIFO
        if config.test_ops.get("change_datalog_backing", False):
            logtype = config.test_ops["change_datalog_backing"]
            log.info(f"change default datalog backing to {logtype}")
            cmd = f"radosgw-admin datalog type --log-type={logtype}"
            change_datalog_type = utils.exec_shell_cmd(cmd)
            if change_datalog_type is False:
                raise TestExecError("Failed to change the datalog type to fifo")
            log.info(
                "restart the rgw daemons and sleep of 30secs for rgw daemon to be up "
            )
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(
                        bucket, rgw_conn, each_user, write_bucket_io_info
                    )
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info(
                        "top level s3 objects to create: %s" % config.objects_count
                    )
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        objects_created_list.append((s3_object_name, s3_object_path))
                        # deleting the local file created after upload
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)

        # delete  object and bucket
        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(
                        bucket, name, path, rgw_conn, each_user
                    )
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check for any ERRORs in datalog list. ref- https://bugzilla.redhat.com/show_bug.cgi?id=1917687
    error_in_data_log_list = reusable.check_datalog_list()
    if error_in_data_log_list:
        raise TestExecError("Error in datalog list")

    # check for data log markers. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1831798#c22
    data_log_marker = reusable.check_datalog_marker()
    log.info(f"The data_log_marker is: {data_log_marker}")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()

        # authenticate sns client.
        rgw_sns_conn = auth.do_auth_sns_client()

        # authenticate with s3 client
        rgw_s3_client = auth.do_auth_using_client()

        # get ceph version
        ceph_version_id, ceph_version_name = utils.get_ceph_version()

        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)

                # create topic with endpoint
                if config.test_ops["create_topic"] is True:
                    endpoint = config.test_ops.get("endpoint")
                    ack_type = config.test_ops.get("ack_type")
                    topic_id = str(uuid.uuid4().hex[:16])
                    persistent = False
                    topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id
                    log.info(
                        f"creating a topic with {endpoint} endpoint with ack type {ack_type}"
                    )
                    if config.test_ops.get("persistent_flag", False):
                        log.info("topic with peristent flag enabled")
                        persistent = config.test_ops.get("persistent_flag")
                    topic = notification.create_topic(rgw_sns_conn, endpoint,
                                                      ack_type, topic_name,
                                                      persistent)

                # get topic attributes
                if config.test_ops.get("get_topic_info", False):
                    log.info("get topic attributes")
                    get_topic_info = notification.get_topic(
                        rgw_sns_conn, topic, ceph_version_name)

                # put bucket notification with topic configured for event
                if config.test_ops["put_get_bucket_notification"] is True:
                    event = config.test_ops.get("event_type")
                    notification_name = "notification-" + str(event)
                    notification.put_bucket_notification(
                        rgw_s3_client,
                        bucket_name_to_create,
                        notification_name,
                        topic,
                        event,
                    )

                    # get bucket notification
                    log.info(
                        f"get bucket notification for bucket : {bucket_name_to_create}"
                    )
                    notification.get_bucket_notification(
                        rgw_s3_client, bucket_name_to_create)

                # create objects
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                # copy objects
                if config.test_ops.get("copy_object", False):
                    log.info("copy object")
                    status = rgw_s3_client.copy_object(
                        Bucket=bucket_name_to_create,
                        Key="copy_of_object" + s3_object_name,
                        CopySource={
                            "Bucket": bucket_name_to_create,
                            "Key": s3_object_name,
                        },
                    )
                    if status is None:
                        raise TestExecError("copy object failed")

            # delete objects
            if config.test_ops.get("delete_bucket_object", False):
                if config.test_ops.get("enable_version", False):
                    for name, path in objects_created_list:
                        reusable.delete_version_object(bucket, name, path,
                                                       rgw_conn, each_user)
                else:
                    reusable.delete_objects(bucket)

            # start kafka broker and consumer
            event_record_path = "/home/cephuser/event_record"
            start_consumer = notification.start_kafka_broker_consumer(
                topic_name, event_record_path)
            if start_consumer is False:
                raise TestExecError("Kafka consumer not running")

            # verify all the attributes of the event record. if event not received abort testcase
            log.info("verify event record attributes")
            verify = notification.verify_event_record(event,
                                                      bucket_name_to_create,
                                                      event_record_path,
                                                      ceph_version_name)
            if verify is False:
                raise EventRecordDataError(
                    "Event record is empty! notification is not seen")

        # delete topic logs on kafka broker
        notification.del_topic_from_kafka_broker(topic_name)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    log.info("starting IO")
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info("sharding configuration will be added now.")
    if config.sharding_type == "dynamic":
        log.info("sharding type is dynamic")
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info("making changes to ceph.conf")
        ceph_conf.set_to_ceph_conf(
            "global",
            ConfigOpts.rgw_max_objs_per_shard,
            str(config.max_objects_per_shard),
        )
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_dynamic_resharding,
                                   "True")
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info("num_shards_expected: %s" % num_shards_expected)
        log.info("trying to restart services ")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    config.bucket_count = 1
    objects_created_list = []
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                    rand_no=1)
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    if config.test_ops.get("enable_version", False):
        log.info("enable bucket version")
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
    log.info("s3 objects to create: %s" % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
        if config.test_ops.get("enable_version", False):
            reusable.upload_version_object(
                config,
                user_info,
                rgw_conn,
                s3_object_name,
                config.obj_size,
                bucket,
                TEST_DATA_PATH,
            )
        else:
            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                   config, user_info)
        objects_created_list.append((s3_object_name, s3_object_path))

    if config.sharding_type == "manual":
        log.info("sharding type is manual")
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info("in manual sharding")
        cmd_exec = utils.exec_shell_cmd(
            "radosgw-admin bucket reshard --bucket=%s --num-shards=%s "
            "--yes-i-really-mean-it" % (bucket.name, config.shards))
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    sleep_time = 600
    log.info(f"verification starts after waiting for {sleep_time} seconds")
    time.sleep(sleep_time)
    op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket %s" %
                              bucket.name)
    json_doc = json.loads(op)
    num_shards_created = json_doc["num_shards"]
    log.info("no_of_shards_created: %s" % num_shards_created)
    if config.sharding_type == "manual":
        if config.shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info("Expected number of shards created")
    if config.sharding_type == "dynamic":
        log.info("Verify if resharding list is empty")
        reshard_list_op = json.loads(
            utils.exec_shell_cmd("radosgw-admin reshard list"))
        if not reshard_list_op:
            log.info(
                "for dynamic number of shards created should be greater than or equal to number of expected shards"
            )
            log.info("no_of_shards_expected: %s" % num_shards_expected)
            if int(num_shards_created) >= int(num_shards_expected):
                log.info("Expected number of shards created")
        else:
            raise TestExecError("Expected number of shards not created")

    if config.test_ops.get("delete_bucket_object", False):
        if config.test_ops.get("enable_version", False):
            for name, path in objects_created_list:
                reusable.delete_version_object(bucket, name, path, rgw_conn,
                                               user_info)
        else:
            reusable.delete_objects(bucket)
        reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Example #6
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            if radoslist_all_error:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
Example #7
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info('making changes to ceph.conf')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_debug_interval, str(config.rgw_lc_debug_interval))
    ceph_version = utils.exec_shell_cmd("ceph version")
    op = ceph_version.split()
    for i in op:
        if i == 'nautilus':
                ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_max_worker, str(config.rgw_lc_max_worker))
    log.info('trying to restart services')
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    config.user_count = 1
    config.bucket_count = 1
    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1)
    obj_list = []
    obj_tag = 'suffix1=WMV1'
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(map(lambda x: x,
                      [rule['Filter'].get('Prefix') or
                       rule['Filter']['And'].get('Prefix')
                       for rule in config.lifecycle_conf]))
    prefix = prefix if prefix else ['dummy1']
    if config.test_ops['enable_versioning'] is True:
        reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info)
        if config.test_ops['create_object'] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + '.' + bucket.name + '.' + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops['version_count'] > 0:
                    for vc in range(config.test_ops['version_count']):
                        log.info('version count for %s is %s' % (s3_object_name, str(vc)))
                        log.info('modifying data: %s' % s3_object_name)
                        reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info,
                                               append_data=True,
                                               append_msg='hello object for version: %s\n' % str(vc))
                else:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops['delete_marker'] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule_new, config)
    if config.test_ops['enable_versioning'] is False:
        if config.test_ops['create_object'] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + '.' + bucket.name + '.' + str(oc)
                obj_list.append(s3_object_name)
                reusable.upload_object_with_tagging(s3_object_name, bucket, TEST_DATA_PATH, config, user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2, life_cycle_rule, config)
        lc_ops.validate_and_rule(bucket, config)
    reusable.remove_user(user_info)
Example #8
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()

    version_count = 3
    # create user
    s3_user = s3lib.create_users(1)[0]
    # authenticate
    auth = Auth(s3_user, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    b1_name = utils.gen_bucket_name_from_userid(s3_user["user_id"], rand_no=1)
    b1_k1_name = b1_name + ".key.1"  # key1
    b1_k2_name = b1_name + ".key.2"  # key2
    b2_name = utils.gen_bucket_name_from_userid(s3_user["user_id"], rand_no=2)
    b2_k1_name = b2_name + ".key.1"  # key1
    b2_k2_name = b2_name + ".key.2"  # key2
    b1 = reusable.create_bucket(b1_name, rgw_conn, s3_user)
    b2 = reusable.create_bucket(b2_name, rgw_conn, s3_user)
    # enable versioning on b1
    reusable.enable_versioning(b1, rgw_conn, s3_user, write_bucket_io_info)
    # upload object to version enabled bucket b1
    obj_sizes = list(config.mapped_sizes.values())
    config.obj_size = obj_sizes[0]
    for vc in range(version_count):
        reusable.upload_object(
            b1_k1_name,
            b1,
            TEST_DATA_PATH,
            config,
            s3_user,
            append_data=True,
            append_msg="hello vc count: %s" % str(vc),
        )
    # upload object to non version bucket b2
    config.obj_size = obj_sizes[1]
    reusable.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config, s3_user)
    # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created
    # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present
    b1_k2 = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "Object",
        "args": [b1.name, b1_k2_name]
    })
    b2_k2 = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "Object",
        "args": [b2.name, b2_k2_name]
    })
    log.info(
        "copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket"
    )
    copy_response = b1_k2.copy_from(CopySource={
        "Bucket": b2.name,
        "Key": b2_k1_name,
    })
    log.info("copy_response: %s" % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info("checking if copies object has version id created")
    b1_k2_version_id = b1_k2.version_id
    log.info("version id: %s" % b1_k2_version_id)
    if b1_k2_version_id is None:
        raise TestExecError(
            "Version ID not created for the copied object on to the versioned enabled bucket"
        )
    else:
        log.info(
            "Version ID created for the copied object on to the versioned bucket"
        )
    all_objects_in_b1 = b1.objects.all()
    log.info("all objects in bucket 1")
    for obj in all_objects_in_b1:
        log.info("object_name: %s" % obj.key)
        versions = b1.object_versions.filter(Prefix=obj.key)
        log.info("displaying all versions of the object")
        for version in versions:
            log.info("key_name: %s --> version_id: %s" %
                     (version.object_key, version.version_id))
    log.info("-------------------------------------------")
    log.info("copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket")
    copy_response = b2_k2.copy_from(CopySource={
        "Bucket": b1.name,
        "Key": b1_k1_name,
    })
    log.info("copy_response: %s" % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info("checking if copies object has version id created")
    b2_k2_version_id = b2_k2.version_id
    log.info("version id: %s" % b2_k2_version_id)
    if b2_k2_version_id is None:
        log.info(
            "Version ID not created for the copied object on to the non versioned bucket"
        )
    else:
        raise TestExecError(
            "Version ID created for the copied object on to the non versioned bucket"
        )
    all_objects_in_b2 = b2.objects.all()
    log.info("all objects in bucket 2")
    for obj in all_objects_in_b2:
        log.info("object_name: %s" % obj.key)
        versions = b2.object_versions.filter(Prefix=obj.key)
        log.info("displaying all versions of the object")
        for version in versions:
            log.info("key_name: %s --> version_id: %s" %
                     (version.object_key, version.version_id))

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()

    version_count = 3
    # create user
    s3_user = s3lib.create_users(1)[0]
    # authenticate
    auth = Auth(s3_user, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    b1_name = 'bucky.1e'  # bucket 1
    b1_k1_name = b1_name + ".key.1"  # key1
    b1_k2_name = b1_name + ".key.2"  # key2
    b2_name = 'bucky.2e'  # bucket 2
    b2_k1_name = b2_name + ".key.1"  # key1
    b2_k2_name = b2_name + ".key.2"  # key2
    b1 = reusable.create_bucket(b1_name, rgw_conn, s3_user)
    b2 = reusable.create_bucket(b2_name, rgw_conn, s3_user)
    # enable versioning on b1
    reusable.enable_versioning(b1, rgw_conn, s3_user, write_bucket_io_info)
    # upload object to version enabled bucket b1
    obj_sizes = list(config.mapped_sizes.values())
    config.obj_size = obj_sizes[0]
    for vc in range(version_count):
        reusable.upload_object(b1_k1_name,
                               b1,
                               TEST_DATA_PATH,
                               config,
                               s3_user,
                               append_data=True,
                               append_msg='hello vc count: %s' % str(vc))
    # upload object to non version bucket b2
    config.obj_size = obj_sizes[1]
    reusable.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config, s3_user)
    # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created
    # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present
    b1_k2 = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'Object',
        'args': [b1.name, b1_k2_name]
    })
    b2_k2 = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'Object',
        'args': [b2.name, b2_k2_name]
    })
    log.info(
        'copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket'
    )
    copy_response = b1_k2.copy_from(CopySource={
        'Bucket': b2.name,
        'Key': b2_k1_name,
    })
    log.info('copy_response: %s' % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info('checking if copies object has version id created')
    b1_k2_version_id = b1_k2.version_id
    log.info('version id: %s' % b1_k2_version_id)
    if b1_k2_version_id is None:
        raise TestExecError(
            'Version ID not created for the copied object on to the versioned enabled bucket'
        )
    else:
        log.info(
            'Version ID created for the copied object on to the versioned bucket'
        )
    all_objects_in_b1 = b1.objects.all()
    log.info('all objects in bucket 1')
    for obj in all_objects_in_b1:
        log.info('object_name: %s' % obj.key)
        versions = b1.object_versions.filter(Prefix=obj.key)
        log.info('displaying all versions of the object')
        for version in versions:
            log.info('key_name: %s --> version_id: %s' %
                     (version.object_key, version.version_id))
    log.info('-------------------------------------------')
    log.info('copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket')
    copy_response = b2_k2.copy_from(CopySource={
        'Bucket': b1.name,
        'Key': b1_k1_name,
    })
    log.info('copy_response: %s' % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info('checking if copies object has version id created')
    b2_k2_version_id = b2_k2.version_id
    log.info('version id: %s' % b2_k2_version_id)
    if b2_k2_version_id is None:
        log.info(
            'Version ID not created for the copied object on to the non versioned bucket'
        )
    else:
        raise TestExecError(
            'Version ID created for the copied object on to the non versioned bucket'
        )
    all_objects_in_b2 = b2.objects.all()
    log.info('all objects in bucket 2')
    for obj in all_objects_in_b2:
        log.info('object_name: %s' % obj.key)
        versions = b2.object_versions.filter(Prefix=obj.key)
        log.info('displaying all versions of the object')
        for version in versions:
            log.info('key_name: %s --> version_id: %s' %
                     (version.object_key, version.version_id))
Example #10
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    objects_created_list = []

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    # making changes to max_objects_per_shard and rgw_gc_obj_min_wait to ceph.conf
    log.info('making changes to ceph.conf')
    log.info(f'rgw_max_objs_per_shard parameter set to {str(config.max_objects_per_shard)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
    log.info(f'rgw gc obj min wait configuration parameter set to {str(config.rgw_gc_obj_min_wait)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,str(config.rgw_gc_obj_min_wait))
    sleep_time = 10
    log.info(f'Restarting RGW service and waiting for {sleep_time} seconds')
    srv_restarted = rgw_service.restart()
    time.sleep(sleep_time)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    for each_user in all_users_info:
    # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                log.info(f'creating {str(bc)} bucket')
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('enable_version', False):
                           log.info('upload versioned objects')
                           reusable.upload_version_object(config, each_user, rgw_conn, s3_object_name, config.obj_size, bucket,
                                                                              TEST_DATA_PATH)
                        else:
                            log.info('upload type: normal')
                            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                        objects_created_list.append((s3_object_name, s3_object_path))
                #deleting the local file created after upload
                if config.local_file_delete is True:
                    log.info('deleting local file created after the upload')
                    utils.exec_shell_cmd('rm -rf %s' % s3_object_path)

                # listing the objects         
                if config.test_ops.get('list_objects', False):
                    if config.test_ops.get('enable_version', False):
                        for name,path in objects_created_list:
                            reusable.list_versioned_objects(bucket,name,path,rgw_conn)
                    else:
                        reusable.list_objects(bucket)
                
                if config.test_ops.get('delete_bucket_object', False):
                    if config.test_ops.get('enable_version', False):
                        for name, path in objects_created_list:
                            print("name, path",name,path)
                            versions = bucket.object_versions.filter(Prefix=name)
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, name]})
                            log.info('deleting versions for s3 obj: %s' % name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        reusable.delete_version_object(bucket,version.version_id, path, rgw_conn, each_user)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                    else:
                        reusable.delete_objects(bucket)
                    log.info(f'deleting the bucket {bucket_name_to_create}')
                    reusable.delete_bucket(bucket)

    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    
    #remove the user
    reusable.remove_user(each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    if config.test_ops['object_structure'] == 'flat':
                        # uploading data
                        log.info('top level s3 objects to create: %s' %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info('s3 object name: %s' % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info('s3 object path: %s' % s3_object_path)
                            if config.test_ops.get(
                                    'upload_type') == 'multipart':
                                log.info('upload type: multipart')
                                reusable.upload_mutipart_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            else:
                                log.info('upload type: normal')
                                reusable.upload_object(s3_object_name, bucket,
                                                       TEST_DATA_PATH, config,
                                                       each_user)
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            #deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    'deleting local file created after the upload'
                                )
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_path)

                    #this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops['object_structure'] == 'pseudo':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info('s3 object name: %s' % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info('s3 object path: %s' % s3_object_path)
                                if config.test_ops.get(
                                        'upload_type') == 'multipart':
                                    log.info('upload type: multipart')
                                    reusable.upload_mutipart_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                else:
                                    log.info('upload type: normal')
                                    reusable.upload_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                #deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        'deleting local file created after the upload'
                                    )
                                    utils.exec_shell_cmd('rm -rf %s' %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops['create_object'] is False:
                    if config.test_ops[
                            'object_structure'] == 'pseudo-dir-only':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count}'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops['radoslist'] is True:
                    log.info(
                        'executing the command radosgw-admin bucket radoslist '
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter
                cmd = 'ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio'
                max_aio_output = utils.exec_shell_cmd(cmd)
                max_aio = max_aio_output.split()[1]

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json['usage']['rgw.main'][
                    'num_objects']

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    'measure the execution time taken to list via radosgw-admin command'
                )
                if config.test_ops['radosgw_listing_ordered'] is True:
                    log.info('ordered listing via radosgw-admin command')
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'ordered')
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops['radosgw_listing_ordered'] is False:
                    log.info('unordered listing via radosgw-admin command')
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'unordered')
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info('measure the execution time taken to list via boto')
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f'with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins'
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        if config.test_ops.get('delete_bucket_object', False):
            if config.test_ops.get('enable_version', False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)