def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()

        # authenticate sns client.
        rgw_sns_conn = auth.do_auth_sns_client()

        # authenticate with s3 client
        rgw_s3_client = auth.do_auth_using_client()

        # get ceph version
        ceph_version_id, ceph_version_name = utils.get_ceph_version()

        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)

                # create topic with endpoint
                if config.test_ops["create_topic"] is True:
                    endpoint = config.test_ops.get("endpoint")
                    ack_type = config.test_ops.get("ack_type")
                    topic_id = str(uuid.uuid4().hex[:16])
                    persistent = False
                    topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id
                    log.info(
                        f"creating a topic with {endpoint} endpoint with ack type {ack_type}"
                    )
                    if config.test_ops.get("persistent_flag", False):
                        log.info("topic with peristent flag enabled")
                        persistent = config.test_ops.get("persistent_flag")
                    topic = notification.create_topic(rgw_sns_conn, endpoint,
                                                      ack_type, topic_name,
                                                      persistent)

                # get topic attributes
                if config.test_ops.get("get_topic_info", False):
                    log.info("get topic attributes")
                    get_topic_info = notification.get_topic(
                        rgw_sns_conn, topic, ceph_version_name)

                # put bucket notification with topic configured for event
                if config.test_ops["put_get_bucket_notification"] is True:
                    event = config.test_ops.get("event_type")
                    notification_name = "notification-" + str(event)
                    notification.put_bucket_notification(
                        rgw_s3_client,
                        bucket_name_to_create,
                        notification_name,
                        topic,
                        event,
                    )

                    # get bucket notification
                    log.info(
                        f"get bucket notification for bucket : {bucket_name_to_create}"
                    )
                    notification.get_bucket_notification(
                        rgw_s3_client, bucket_name_to_create)

                # create objects
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                # copy objects
                if config.test_ops.get("copy_object", False):
                    log.info("copy object")
                    status = rgw_s3_client.copy_object(
                        Bucket=bucket_name_to_create,
                        Key="copy_of_object" + s3_object_name,
                        CopySource={
                            "Bucket": bucket_name_to_create,
                            "Key": s3_object_name,
                        },
                    )
                    if status is None:
                        raise TestExecError("copy object failed")

            # delete objects
            if config.test_ops.get("delete_bucket_object", False):
                if config.test_ops.get("enable_version", False):
                    for name, path in objects_created_list:
                        reusable.delete_version_object(bucket, name, path,
                                                       rgw_conn, each_user)
                else:
                    reusable.delete_objects(bucket)

            # start kafka broker and consumer
            event_record_path = "/home/cephuser/event_record"
            start_consumer = notification.start_kafka_broker_consumer(
                topic_name, event_record_path)
            if start_consumer is False:
                raise TestExecError("Kafka consumer not running")

            # verify all the attributes of the event record. if event not received abort testcase
            log.info("verify event record attributes")
            verify = notification.verify_event_record(event,
                                                      bucket_name_to_create,
                                                      event_record_path,
                                                      ceph_version_name)
            if verify is False:
                raise EventRecordDataError(
                    "Event record is empty! notification is not seen")

        # delete topic logs on kafka broker
        notification.del_topic_from_kafka_broker(topic_name)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])

    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    buckets_created = []

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc
            )
            log.info("creating bucket with name: %s" % bucket_name)
            bucket = reusable.create_bucket(
                bucket_name, s3_client_rgw, assumed_role_user_info
            )
            buckets_created.append(bucket)

        if config.test_ops["create_object"] is True:
            for bucket in buckets_created:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    if config.test_ops["server_side_copy"] is True:
        bucket1, bucket2 = buckets_created

        # copy object1 from bucket1 to bucket2 with the same name as in bucket1
        log.info("copying first object from bucket1 to bucket2")
        all_keys_in_buck1 = []
        for obj in bucket1.objects.all():
            all_keys_in_buck1.append(obj.key)
        copy_source = {"Bucket": bucket1.name, "Key": all_keys_in_buck1[0]}
        copy_object_name = all_keys_in_buck1[0] + "_copied_obj"
        log.info(f"copy object name: {copy_object_name}")
        bucket2.copy(copy_source, copy_object_name)

        # list the objects in bucket2
        log.info("listing all objects im bucket2 after copy")
        all_bucket2_objs = []
        for obj in bucket2.objects.all():
            log.info(obj.key)
            all_bucket2_objs.append(obj.key)

        # check for object existence in bucket2
        if copy_object_name in all_bucket2_objs:
            log.info("server side copy successful")
        else:
            raise TestExecError("server side copy operation was not successful")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #3
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            if radoslist_all_error:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # check the default data log backing
    default_data_log = reusable.get_default_datalog_type()
    log.info(f"{default_data_log} is the default data log backing")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        objects_created_list = []

        # change the default datalog backing to FIFO
        if config.test_ops.get("change_datalog_backing", False):
            logtype = config.test_ops["change_datalog_backing"]
            log.info(f"change default datalog backing to {logtype}")
            cmd = f"radosgw-admin datalog type --log-type={logtype}"
            change_datalog_type = utils.exec_shell_cmd(cmd)
            if change_datalog_type is False:
                raise TestExecError("Failed to change the datalog type to fifo")
            log.info(
                "restart the rgw daemons and sleep of 30secs for rgw daemon to be up "
            )
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(
                        bucket, rgw_conn, each_user, write_bucket_io_info
                    )
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info(
                        "top level s3 objects to create: %s" % config.objects_count
                    )
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        objects_created_list.append((s3_object_name, s3_object_path))
                        # deleting the local file created after upload
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)

        # delete  object and bucket
        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(
                        bucket, name, path, rgw_conn, each_user
                    )
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check for any ERRORs in datalog list. ref- https://bugzilla.redhat.com/show_bug.cgi?id=1917687
    error_in_data_log_list = reusable.check_datalog_list()
    if error_in_data_log_list:
        raise TestExecError("Error in datalog list")

    # check for data log markers. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1831798#c22
    data_log_marker = reusable.check_datalog_marker()
    log.info(f"The data_log_marker is: {data_log_marker}")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
Beispiel #5
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)  # ,config=config)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()

        user_id = each_user["user_id"]
        # Creating a seed for multi-factor authentication
        log.info("Creating a seed for multi-factor authentication")
        cmd = "head -10 /dev/urandom | sha512sum | cut -b 1-30"
        SEED = utils.exec_shell_cmd(cmd)
        log.info(
            "Configure the one-time password generator oathtool and the back-end MFA system to use the same seed."
        )
        test_totp = reusable.generate_totp(SEED)
        serial = "MFAtest" + str(random.randrange(1, 100))
        if test_totp is False:
            raise TestExecError(
                "Failed to configure one-time password generator - oathtool")

        if config.test_ops["mfa_create"] is True:
            log.info("Create a new MFA TOTP token")
            cmd = f"time radosgw-admin mfa create --uid={user_id} --totp-serial={serial} --totp-seed={SEED}"
            mfa_create = utils.exec_shell_cmd(cmd)
            if mfa_create is False:
                raise TestExecError("Failed to create new MFA TOTP token!")

        # Verify no crash is seen with in correct syntax for mfa resync command  BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1947862
        if config.test_ops.get("mfa_resync_invalid_syntax") is True:
            log.info(
                "Validate the mfa resync command errors out with approriate message on invalid syntax"
            )
            get_totp = reusable.generate_totp(SEED)
            cmd = f"radosgw-admin mfa resync --uid {user_id} --totp-serial={serial} --totp-seed={SEED} --totp-pin={get_totp}"
            mfa_resync_invalid_syntax = utils.exec_shell_cmd(cmd)
            if mfa_resync_invalid_syntax is False:
                log.info("appropriate usage message displayed")
            else:
                raise TestExecError("Usage message not displayed")

        if config.test_ops["mfa_check"] is True:
            log.info(
                "Test a multi-factor authentication (MFA) time-based one time password (TOTP) token."
            )
            get_totp = reusable.generate_totp(SEED)
            cmd = (
                "time radosgw-admin mfa check --uid=%s --totp-serial=%s --totp-pin=%s"
                % (each_user["user_id"], serial, get_totp))
            cmd = f"time radosgw-admin mfa check --uid={user_id} --totp-serial={serial} --totp-pin={get_totp}"
            mfa_check = utils.exec_shell_cmd(cmd)
            if mfa_check is False:
                log.info(
                    "Resynchronize a multi-factor authentication TOTP token in case of time skew or failed checks."
                )
                previous_pin = reusable.generate_totp(SEED)
                log.info("Sleep of 30 seconds to fetch another totp")
                time.sleep(30)
                current_pin = reusable.generate_totp(SEED)
                cmd = "time radosgw-admin mfa resync --uid {user_id} --totp-serial {serial} --totp-pin {get_totp} --totp-pin {get_totp}"
                mfa_resync = utils.exec_shell_cmd(cmd)
                if mfa_resync is False:
                    raise TestExecError("Failed to resync token")
                log.info(
                    "Verify the token was successfully resynchronized by testing a new PIN"
                )
                get_totp = reusable.generate_totp(SEED)
                cmd = f"time radosgw-admin mfa check --uid {user_id} --totp-serial {serial} --totp-pin {get_totp}"
                mfa_check_resync = utils.exec_shell_cmd(cmd)
                if "ok" not in mfa_check_resync:
                    raise TestExecError("Failed to verify resync token")

        if config.test_ops["mfa_list"] is True:
            log.info("List MFA TOTP tokens")
            cmd = f"radosgw-admin mfa list --uid {user_id}"
            mfa_list = utils.exec_shell_cmd(cmd)
            if "MFAtest" in mfa_list:
                log.info("MFA token is listed for the given user")

        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_mfa_version", False):
                    log.info("enable bucket versioning and MFA deletes")

                    token, status = reusable.enable_mfa_versioning(
                        bucket, rgw_conn, SEED, serial, each_user,
                        write_bucket_io_info)
                    if status is False:
                        log.info(
                            "trying again! AccessDenied could be a timing issue!"
                        )
                        new_token = reusable.generate_totp(SEED)
                        if token == new_token:
                            log.info(
                                "sleep of 30secs to generate another TOTP token"
                            )
                            time.sleep(30)
                        status = reusable.enable_mfa_versioning(
                            bucket,
                            rgw_conn,
                            SEED,
                            serial,
                            each_user,
                            write_bucket_io_info,
                        )
                        if status is False:
                            raise MFAVersionError(
                                "Failed to enable MFA and versioning on the bucket!"
                            )

                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info(
                        f"top level s3 objects to create: {config.objects_count}"
                    )
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info(f"s3 object name: {s3_object_name}")
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info(f"s3 object path: {s3_object_path}")
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        objects_created_list.append(
                            (s3_object_name, s3_object_path))

                        # deleting the local file created after upload
                        if config.local_file_delete is True:
                            log.info(
                                "deleting local file created after the upload")
                            cmd = f"rm -rf {s3_object_path}"
                            utils.exec_shell_cmd(cmd)

                        # bucket list to check the objects
                        cmd = f"radosgw-admin bucket list --bucket {bucket_name_to_create}"
                        bucket_list = utils.exec_shell_cmd(cmd)

                if config.test_ops["delete_mfa_object"] is True:
                    for s3_object_name, path in objects_created_list:
                        log.info(
                            "Deleting an object configured with MFA should have TOTP token"
                        )
                        versions = bucket.object_versions.filter(
                            Prefix=s3_object_name)
                        for version in versions:
                            log.info(
                                f"key_name: {version.object_key} --> version_id: {version.version_id}"
                            )
                            log.info("Deleting the object with TOTP token")
                            get_totp = reusable.generate_totp(SEED)
                            cmd = f"radosgw-admin object rm --object {version.object_key} --object-version {version.version_id} --totp-pin {get_totp} --bucket {bucket_name_to_create}"
                            object_delete = utils.exec_shell_cmd(cmd)
                            if object_delete is False:
                                raise TestExecError(
                                    "Object deletion with MFA token failed!")

                            log.info("Verify object is deleted permanently")
                            cmd = f"radosgw-admin bucket list --bucket {bucket_name_to_create}"
                            bucket_list = utils.exec_shell_cmd(cmd)
                            if version.version_id in bucket_list:
                                raise TestExecError(
                                    "Object version is still present, Failed to delete the object"
                                )

                if config.test_ops["delete_bucket"] is True:
                    log.info(f"Deleting the bucket {bucket_name_to_create}")
                    time.sleep(10)
                    reusable.delete_bucket(bucket)

                if config.test_ops["remove_mfa"] is True:
                    log.info(
                        "Delete a multi-factor authentication (MFA) time-based one time password (TOTP) token"
                    )
                    cmd = f"radosgw-admin mfa remove --uid {user_id} --totp-serial {serial}"
                    mfa_remove = utils.exec_shell_cmd(cmd)
                    if mfa_remove is False:
                        raise TestExecError("MFA delete failed")
                    log.info("Verify the MFA token is deleted")
                    cmd = (
                        f"radosgw-admin mfa get --uid {user_id} --totp-serial {serial}"
                    )
                    mfa_get = utils.exec_shell_cmd(cmd)
                    if mfa_get is False:
                        log.info("MFA token successfully deleted for user")
                    else:
                        raise TestExecError("MFA token delete for user failed")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #6
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops['sharding']['enable'] is True:
            log.info('enabling sharding on buckets')
            max_shards = config.test_ops['sharding']['max_shards']
            log.info('making changes to ceph.conf')
            ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_override_bucket_index_max_shards,
                                       str(max_shards))
            log.info('trying to restart services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.test_ops['compression']['enable'] is True:
            compression_type = config.test_ops['compression']['type']
            log.info('enabling compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=%s' % (zone,compression_type)
            out = utils.exec_shell_cmd(cmd)
            try:
                data = json.loads(out)
                if data['placement_pools'][0]['val']['storage_classes']['STANDARD']['compression_type'] == compression_type:
                    log.info('Compression enabled successfully')
                else:
                    raise ValueError('failed to enable compression')
            except ValueError as e:
                exit(str(e))
            log.info('trying to restart rgw services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)

        # create buckets
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user)
                if config.test_ops['create_object'] is True:
                    # uploading data
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('upload_type') == 'multipart':
                            log.info('upload type: multipart')
                            reusable.upload_mutipart_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                                            each_user)
                        else:
                            log.info('upload type: normal')
                            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                        if config.test_ops['download_object'] is True:
                            log.info('trying to download object: %s' % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_download_name)
                            log.info('s3_object_download_path: %s' % s3_object_download_path)
                            log.info('downloading to filename: %s' % s3_object_download_name)
                            if config.test_ops.get('encryption_algorithm', None) is not None:
                                log.info('encryption download')
                                log.info('encryption algorithm: %s' % config.test_ops['encryption_algorithm'])
                                object_downloaded_status = bucket.download_file(s3_object_name,
                                                                                s3_object_download_path,
                                                                                ExtraArgs={
                                                                                    'SSECustomerKey': encryption_key,
                                                                                    'SSECustomerAlgorithm':
                                                                                        config.test_ops[
                                                                                            'encryption_algorithm']})
                            else:
                                object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                              'resource': 'download_file',
                                                                              'args': [s3_object_name,
                                                                                       s3_object_download_path],
                                                                              })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
                            log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                            log.info('s3_object_uploaded_md5: %s' % s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(s3_object_downloaded_md5):
                                log.info('md5 match')
                                utils.exec_shell_cmd('rm -rf %s' % s3_object_download_path)
                            else:
                                raise TestExecError('md5 mismatch')
                        if config.local_file_delete is True:
                            log.info('deleting local file created after the upload')
                            utils.exec_shell_cmd('rm -rf %s' % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s | grep bucket_id' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace('"', '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls | grep %s' % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info('got output from sharing verification.--------')
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops['delete_bucket_object'] is True:
                        reusable.delete_objects(bucket)
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops['compression']['enable'] is True:
            log.info('disable compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=none' % zone
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status('test failed')
                sys.exit(1)
Beispiel #7
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=%s" %
                   (zone, compression_type))
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (data["placement_pools"][0]["val"]["compression"] ==
                            compression_type):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (data["placement_pools"][0]["val"]
                            ["storage_classes"]["STANDARD"]["compression_type"]
                                == compression_type):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info("s3_object_download_path: %s" %
                                     s3_object_download_path)
                            log.info("downloading to filename: %s" %
                                     s3_object_download_name)
                            if (config.test_ops.get("encryption_algorithm",
                                                    None) is not None):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s" %
                                    config.test_ops["encryption_algorithm"])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey":
                                        encryption_key,
                                        "SSECustomerAlgorithm":
                                        config.
                                        test_ops["encryption_algorithm"],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    "obj":
                                    bucket,
                                    "resource":
                                    "download_file",
                                    "args": [
                                        s3_object_name,
                                        s3_object_download_path,
                                    ],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info("s3_object_downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("s3_object_uploaded_md5: %s" %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info("md5 match")
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info(
                                "deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (out.replace(
                            '"',
                            "").strip().split(":")[1].strip().replace(",", ""))
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            "got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        log.info(
                            "set debug_rgw to 20 before delete the bucket")
                        config.debug_rgw = 20
                        ceph_conf.set_to_ceph_conf("global",
                                                   ConfigOpts.debug_rgw,
                                                   str(config.debug_rgw))
                        log.info("trying to restart services")
                        srv_restarted = rgw_service.restart()
                        time.sleep(20)
                        if srv_restarted is False:
                            raise TestExecError("RGW service restart failed")
                        else:
                            log.info("RGW service restarted")
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=none" %
                   zone)
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #8
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    if config.dbr_scenario == "brownfield":
        user_brownfiled = "brownfield_user"
        all_users_info = s3lib.create_users(config.user_count, user_brownfiled)
    else:
        all_users_info = s3lib.create_users(config.user_count)

    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=%s"
                % (zone, compression_type)
            )
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (
                        data["placement_pools"][0]["val"]["compression"]
                        == compression_type
                    ):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (
                            data["placement_pools"][0]["val"]["storage_classes"][
                                "STANDARD"
                            ]["compression_type"]
                            == compression_type
                        ):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)
        if config.dynamic_resharding is True:
            if utils.check_dbr_support():
                log.info("making changes to ceph.conf")
                ceph_conf.set_to_ceph_conf(
                    "global",
                    ConfigOpts.rgw_max_objs_per_shard,
                    str(config.max_objects_per_shard),
                )
                srv_restarted = rgw_service.restart()

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                if config.bucket_sync_crash is True:
                    is_primary = utils.is_cluster_primary()
                    if is_primary:
                        bucket_name_to_create = "bkt_crash_check"
                if config.dbr_scenario == "brownfield":
                    bucket_name_to_create = "brownfield_bucket"

                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.dynamic_resharding is True:
                    reusable.check_sync_status()
                    op = utils.exec_shell_cmd(
                        f"radosgw-admin bucket stats --bucket {bucket.name}"
                    )
                    json_doc = json.loads(op)
                    old_num_shards = json_doc["num_shards"]
                    log.info(f"no_of_shards_created: {old_num_shards}")
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    if utils.check_dbr_support():
                        if bucket_name_to_create == "brownfield_bucket":
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            if bool(json_doc["usage"]):
                                num_object = json_doc["usage"]["rgw.main"][
                                    "num_objects"
                                ]
                                config.objects_count = (
                                    num_object * 2 + config.objects_count
                                )
                                config.mapped_sizes = utils.make_mapped_sizes(config)

                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name
                            )
                            log.info(
                                "s3_object_download_path: %s" % s3_object_download_path
                            )
                            log.info(
                                "downloading to filename: %s" % s3_object_download_name
                            )
                            if (
                                config.test_ops.get("encryption_algorithm", None)
                                is not None
                            ):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s"
                                    % config.test_ops["encryption_algorithm"]
                                )
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey": encryption_key,
                                        "SSECustomerAlgorithm": config.test_ops[
                                            "encryption_algorithm"
                                        ],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op(
                                    {
                                        "obj": bucket,
                                        "resource": "download_file",
                                        "args": [
                                            s3_object_name,
                                            s3_object_download_path,
                                        ],
                                    }
                                )
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path
                            )
                            s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
                            log.info(
                                "s3_object_downloaded_md5: %s"
                                % s3_object_downloaded_md5
                            )
                            log.info(
                                "s3_object_uploaded_md5: %s" % s3_object_uploaded_md5
                            )
                            if str(s3_object_uploaded_md5) == str(
                                s3_object_downloaded_md5
                            ):
                                log.info("md5 match")
                                utils.exec_shell_cmd(
                                    "rm -rf %s" % s3_object_download_path
                                )
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    if config.bucket_sync_crash is True:
                        is_primary = utils.is_cluster_primary()
                        if is_primary is False:
                            crash_info = reusable.check_for_crash()
                            if crash_info:
                                raise TestExecError("ceph daemon crash found!")
                            realm, source_zone = utils.get_realm_source_zone_info()
                            log.info(f"Realm name: {realm}")
                            log.info(f"Source zone name: {source_zone}")
                            for i in range(600):  # Running sync command for 600 times
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket sync run --bucket bkt_crash_check --rgw-curl-low-speed-time=0 --source-zone {source_zone} --rgw-realm {realm}"
                                )
                                crash_info = reusable.check_for_crash()
                                if crash_info:
                                    raise TestExecError("ceph daemon crash found!")
                                time.sleep(1)
                    if config.dynamic_resharding is True:
                        if utils.check_dbr_support():
                            reusable.check_sync_status()
                            for i in range(10):
                                time.sleep(
                                    60
                                )  # Adding delay for processing reshard list
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket stats --bucket {bucket.name}"
                                )
                                json_doc = json.loads(op)
                                new_num_shards = json_doc["num_shards"]
                                log.info(f"no_of_shards_created: {new_num_shards}")
                                if new_num_shards > old_num_shards:
                                    break
                            else:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    if config.manual_resharding is True:
                        if utils.check_dbr_support():
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            old_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {old_num_shards}")
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard add --bucket {bucket.name} --num-shards {config.shards}"
                            )
                            op = utils.exec_shell_cmd("radosgw-admin reshard process")
                            time.sleep(60)
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            new_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {new_num_shards}")
                            if new_num_shards <= old_num_shards:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    # verification of shards after upload
                    if config.test_datalog_trim_command is True:
                        shard_id, end_marker = reusable.get_datalog_marker()
                        cmd = f"sudo radosgw-admin datalog trim --shard-id {shard_id} --end-marker {end_marker} --debug_ms=1 --debug_rgw=20"
                        out, err = utils.exec_shell_cmd(cmd, debug_info=True)
                        if "Segmentation fault" in err:
                            raise TestExecError("Segmentation fault occured")

                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name
                        )
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (
                            out.replace('"', "")
                            .strip()
                            .split(":")[1]
                            .strip()
                            .replace(",", "")
                        )
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info("got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        time.sleep(10)
                        reusable.check_sync_status()
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=none" % zone
            )
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    if config.test_ops.get("upload_type") == "multipart":
        srv_time_pre_op = get_svc_time()

    # create user
    tenant1 = "tenant_" + random.choice(string.ascii_letters)
    tenant1_user_info = s3lib.create_tenant_users(tenant_name=tenant1,
                                                  no_of_users_to_create=2)
    tenant1_user1_info = tenant1_user_info[0]
    tenant1_user2_info = tenant1_user_info[1]

    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant1_user2_auth = Auth(tenant1_user2_info, ssl=config.ssl)

    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant1_user2 = tenant1_user2_auth.do_auth()
    rgw_tenant1_user2_c = tenant1_user2_auth.do_auth_using_client()

    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant1_user2_info["user_id"]],
        actions_list=["ListBucketMultiPartUploads"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")

    if config.test_ops.get("upload_type") == "multipart":
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            for bucket in [t1_u1_bucket1, t1_u1_bucket2]:
                s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                log.info("s3 objects to create: %s" % config.objects_count)
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    tenant1_user1_info,
                )
        srv_time_post_op = get_svc_time()
        log.info(srv_time_pre_op)
        log.info(srv_time_post_op)

        if srv_time_post_op > srv_time_pre_op:
            log.info("Service is running without crash")
        else:
            raise TestExecError("Service got crashed")

    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])

    # List multipart uploads with tenant1_user2 user with bucket t1_u1_bucket1
    multipart_object1 = rgw_tenant1_user2_c.list_multipart_uploads(
        Bucket=t1_u1_bucket1.name)
    log.info("Multipart object %s" % multipart_object1)

    # Verify tenant1_user2 not having permission for listing multipart uploads in t1_u1_bucket2
    try:
        multipart_object2 = rgw_tenant1_user2_c.list_multipart_uploads(
            Bucket=t1_u1_bucket2.name)
        raise Exception(
            "%s user should not list multipart uploads in bucket: %s" %
            (tenant1_user2_info["user_id"], t1_u1_bucket2.name))
    except ClientError as err:
        log.info("Listing failed as expected with exception: %s" % err)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info('no of buckets to create: %s' % config.bucket_count)
    # create buckets
    if config.test_ops['create_bucket'] is True:
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, user_info)
            if config.test_ops['create_object'] is True:
                # uploading data
                log.info('s3 objects to create: %s' % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, oc)
                    log.info('s3 object name: %s' % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    log.info('s3 object path: %s' % s3_object_path)
                    if config.test_ops.get('upload_type') == 'multipart':
                        log.info('upload type: multipart')
                        reusable.upload_mutipart_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                                        user_info)
                    else:
                        log.info('upload type: normal')
                        reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)

                    if config.gc_verification is True:
                        log.info('making changes to ceph.conf')
                        config.rgw_gc_obj_min_wait = 5
                        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,
                                                   str(config.rgw_gc_obj_min_wait))
                        log.info('trying to restart services')
                        srv_restarted = rgw_service.restart()
                        time.sleep(30)
                        if srv_restarted is False:
                            raise TestExecError("RGW service restart failed")
                        else:
                            log.info('RGW service restarted')
                        log.info('download the large object again to populate gc list with shadow entries')
                        reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config)
                        time.sleep(60)
                        gc_list_output = json.loads(utils.exec_shell_cmd("radosgw-admin gc list --include-all"))

                        log.info(gc_list_output)
                        
                        if gc_list_output:
                            log.info("Shadow objects found after setting the rgw_gc_obj_min_wait to 5 seconds")
                            utils.exec_shell_cmd("radosgw-admin gc process")
                            log.info('Object download should not error out in 404 NoSuchKey error')
                            reusable.download_object(s3_object_name, bucket, TEST_DATA_PATH, s3_object_path, config)

        reusable.remove_user(user_info)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    # Adding caps for user1
    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    # user1 auth with iam_client
    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    # policy document
    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])
    print(policy_document)

    # role policy
    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")
    print(role_policy)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    # role creation happens here
    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    # Put role policy happening here
    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    # bucket creation operations now
    bucket_name = "testbucket" + user1["user_id"]

    # authenticating user1 for bucket creation operation
    auth = Auth(user1, ssl=config.ssl)
    user1_info = {
        "access_key": user1["access_key"],
        "secret_key": user1["secret_key"],
        "user_id": user1["user_id"],
    }
    s3_client_u1 = auth.do_auth()

    # bucket creation operation
    bucket = reusable.create_bucket(bucket_name, s3_client_u1, user1_info)

    # uploading objects to the bucket
    if config.test_ops["create_object"]:
        # uploading data
        log.info("s3 objects to create: %s" % config.objects_count)
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            s3_object_name = utils.gen_s3_object_name(bucket_name, oc)
            log.info("s3 object name: %s" % s3_object_name)
            s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
            log.info("s3 object path: %s" % s3_object_path)
            if config.test_ops.get("upload_type") == "multipart":
                log.info("upload type: multipart")
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )
            else:
                log.info("upload type: normal")
                reusable.upload_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )
    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }
    log.info("got the credentials after assume role")

    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client = s3client.do_auth_using_client()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    unexisting_object = bucket_name + "_unexisting_object"
    try:
        response = s3_client.head_object(Bucket=bucket_name, Key=unexisting_object)
    except botocore.exceptions.ClientError as e:
        response_code = e.response["Error"]["Code"]
        log.info(response_code)
        if e.response["Error"]["Code"] == "404":
            log.info("404 Unexisting Object Not Found")
        elif e.response["Error"]["Code"] == "403":
            raise TestExecError("Error code : 403 - HeadObject operation: Forbidden")
Beispiel #12
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_sts_key,
                                     sesison_encryption_token)
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts,
                                     True)
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1)
    iam_client = auth.do_auth_iam_client()
    """
    TODO:
    policy_document and role_policy can be used valid dict types.
    need to explore on this. 
    """

    policy_document = (
        '{"Version":"2012-10-17",'
        '"Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/%s"]},'
        '"Action":["sts:AssumeRole"]}]}' % (user2["user_id"]))

    role_policy = ('{"Version":"2012-10-17",'
                   '"Statement":{"Effect":"Allow",'
                   '"Action":"s3:*",'
                   '"Resource":"arn:aws:s3:::*"}}')

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.
        format(user_id=user1["user_id"]))
    utils.exec_shell_cmd(add_caps_cmd)

    # log.info(policy_document)
    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        })
    write_user_info.add_user_info(user_info)

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            bucket = reusable.create_bucket(bucket_name_to_create,
                                            s3_client_rgw,
                                            assumed_role_user_info)
            if config.test_ops["create_object"] is True:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    if config.test_ops.get("upload_type") == "multipart":
        srv_time_pre_op = get_svc_time()

    # create user
    config.user_count = 1
    tenant1 = "MountEverest"
    tenant2 = "Himalayas"
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    for each_user in tenant1_user_info:
        tenant1_user1_information = each_user
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info["user_id"]],
        actions_list=["CreateBucket"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")

    if config.test_ops.get("upload_type") == "multipart":
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            s3_object_name = utils.gen_s3_object_name(t1_u1_bucket1.name, oc)
            log.info("s3 objects to create: %s" % config.objects_count)
            reusable.upload_mutipart_object(
                s3_object_name,
                t1_u1_bucket1,
                TEST_DATA_PATH,
                config,
                tenant1_user1_information,
            )
        srv_time_post_op = get_svc_time()
        log.info(srv_time_pre_op)
        log.info(srv_time_post_op)

        if srv_time_post_op > srv_time_pre_op:
            log.info("Service is running without crash")
        else:
            raise TestExecError("Service got crashed")

    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == "modify":
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info("modifying buckey policy")
        actions_list = ["ListBucket", "CreateBucket"]
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy["Policy"])
        log.info("got bucket policy:%s\n" % modified_policy)
        actions_list_from_modified_policy = modified_policy["Statement"][0][
            "Action"]
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info("cleaned_actions_list_from_modified_policy: %s" %
                 cleaned_actions_list_from_modified_policy)
        log.info("actions list to be modified: %s" % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info("cmp_val: %s" % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == "replace":
        log.info("replacing new bucket policy")
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["ListBucket"],
            resources=[t1_u1_bucket2.name],
        )
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("new bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == "delete":
        log.info("in delete bucket policy")
        delete_policy = s3lib.resource_op({
            "obj": bucket_policy_obj,
            "resource": "delete",
            "args": None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "NoSuchBucketPolicy":
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 1
    config.rgw_lifecycle_work_time = "00:00-23:59"
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    rgw_service.status()
    # create user
    user_info = s3lib.create_users(config.user_count)
    for each_user in user_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        if config.test_ops["create_bucket"]:
            log.info("no of buckets to create: %s" % config.bucket_count)
            # create bucket
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=1)
                bucket = reusable.create_bucket(bucket_name, rgw_conn,
                                                each_user)
                life_cycle_rule = {"Rules": config.lifecycle_conf}
                reusable.put_bucket_lifecycle(bucket, rgw_conn, rgw_conn2,
                                              life_cycle_rule)
                if config.test_ops["create_object"]:
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        log.info(
                            f"s3 objects to create of size {config.obj_size}")
                        s3_object_name = config.lifecycle_conf[0]["Filter"][
                            "Prefix"] + str(oc)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info(
                            f"s3 object path: {s3_object_path}, name: {s3_object_name}"
                        )
                        reusable.upload_mutipart_object(
                            s3_object_name, bucket, TEST_DATA_PATH, config,
                            each_user)

                for i in (1, 100):
                    time.sleep(60)
                    bucket_details = json.loads(
                        utils.exec_shell_cmd(
                            f"radosgw-admin bucket stats --bucket={bucket.name}"
                        ))
                    if bucket_details["usage"]["rgw.main"]["num_objects"] == 0:
                        break
                else:
                    raise TestExecError(
                        "Bucket object expiration taking longer than expected")

                gc_list_output = json.loads(
                    utils.exec_shell_cmd(
                        "radosgw-admin gc list --include-all"))
                if gc_list_output:
                    log.info("Removing shadow objects found")
                    utils.exec_shell_cmd(
                        "radosgw-admin gc process --include-all")

                bucket_id = (bucket_details["id"] + "_" +
                             config.lifecycle_conf[0]["Filter"]["Prefix"])
                log.info(
                    f"check for all the entry {bucket_id} for the bucket in data pool"
                )
                obj_pool = utils.exec_shell_cmd(
                    f"rados ls -p default.rgw.buckets.data | grep {bucket_id}")
                if obj_pool:
                    for obj in obj_pool:
                        object_name = obj.split("_")[-1]
                        log.info(f"s3 object name to download: {object_name}")
                        object_name_downloaded = object_name + "." + "download"
                        object_download_path = os.path.join(
                            TEST_DATA_PATH, object_name_downloaded)
                        object_downloaded_status = s3lib.resource_op({
                            "obj":
                            bucket,
                            "resource":
                            "download_file",
                            "args": [object_name, object_download_path],
                        })
                        if object_downloaded_status is False:
                            log.info("As expected object is not Downloadable")
                        if object_downloaded_status is None:
                            raise TestExecError(
                                "Objects are not listed but can be downloadable"
                            )

                if config.local_file_delete:
                    log.info("deleting local file created after the upload")
                    utils.exec_shell_cmd(f"rm -rf {TEST_DATA_PATH}")

                reusable.delete_bucket(bucket)
        reusable.remove_user(each_user)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    if config.test_ops['object_structure'] == 'flat':
                        # uploading data
                        log.info('top level s3 objects to create: %s' %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info('s3 object name: %s' % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info('s3 object path: %s' % s3_object_path)
                            if config.test_ops.get(
                                    'upload_type') == 'multipart':
                                log.info('upload type: multipart')
                                reusable.upload_mutipart_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            else:
                                log.info('upload type: normal')
                                reusable.upload_object(s3_object_name, bucket,
                                                       TEST_DATA_PATH, config,
                                                       each_user)
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            #deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    'deleting local file created after the upload'
                                )
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_path)

                    #this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops['object_structure'] == 'pseudo':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info('s3 object name: %s' % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info('s3 object path: %s' % s3_object_path)
                                if config.test_ops.get(
                                        'upload_type') == 'multipart':
                                    log.info('upload type: multipart')
                                    reusable.upload_mutipart_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                else:
                                    log.info('upload type: normal')
                                    reusable.upload_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                #deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        'deleting local file created after the upload'
                                    )
                                    utils.exec_shell_cmd('rm -rf %s' %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops['create_object'] is False:
                    if config.test_ops[
                            'object_structure'] == 'pseudo-dir-only':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count}'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops['radoslist'] is True:
                    log.info(
                        'executing the command radosgw-admin bucket radoslist '
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter
                cmd = 'ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio'
                max_aio_output = utils.exec_shell_cmd(cmd)
                max_aio = max_aio_output.split()[1]

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json['usage']['rgw.main'][
                    'num_objects']

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    'measure the execution time taken to list via radosgw-admin command'
                )
                if config.test_ops['radosgw_listing_ordered'] is True:
                    log.info('ordered listing via radosgw-admin command')
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'ordered')
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops['radosgw_listing_ordered'] is False:
                    log.info('unordered listing via radosgw-admin command')
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'unordered')
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info('measure the execution time taken to list via boto')
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f'with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins'
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        if config.test_ops.get('delete_bucket_object', False):
            if config.test_ops.get('enable_version', False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)