def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()

        # authenticate sns client.
        rgw_sns_conn = auth.do_auth_sns_client()

        # authenticate with s3 client
        rgw_s3_client = auth.do_auth_using_client()

        # get ceph version
        ceph_version_id, ceph_version_name = utils.get_ceph_version()

        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)

                # create topic with endpoint
                if config.test_ops["create_topic"] is True:
                    endpoint = config.test_ops.get("endpoint")
                    ack_type = config.test_ops.get("ack_type")
                    topic_id = str(uuid.uuid4().hex[:16])
                    persistent = False
                    topic_name = "cephci-kafka-" + ack_type + "-ack-type-" + topic_id
                    log.info(
                        f"creating a topic with {endpoint} endpoint with ack type {ack_type}"
                    )
                    if config.test_ops.get("persistent_flag", False):
                        log.info("topic with peristent flag enabled")
                        persistent = config.test_ops.get("persistent_flag")
                    topic = notification.create_topic(rgw_sns_conn, endpoint,
                                                      ack_type, topic_name,
                                                      persistent)

                # get topic attributes
                if config.test_ops.get("get_topic_info", False):
                    log.info("get topic attributes")
                    get_topic_info = notification.get_topic(
                        rgw_sns_conn, topic, ceph_version_name)

                # put bucket notification with topic configured for event
                if config.test_ops["put_get_bucket_notification"] is True:
                    event = config.test_ops.get("event_type")
                    notification_name = "notification-" + str(event)
                    notification.put_bucket_notification(
                        rgw_s3_client,
                        bucket_name_to_create,
                        notification_name,
                        topic,
                        event,
                    )

                    # get bucket notification
                    log.info(
                        f"get bucket notification for bucket : {bucket_name_to_create}"
                    )
                    notification.get_bucket_notification(
                        rgw_s3_client, bucket_name_to_create)

                # create objects
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                # copy objects
                if config.test_ops.get("copy_object", False):
                    log.info("copy object")
                    status = rgw_s3_client.copy_object(
                        Bucket=bucket_name_to_create,
                        Key="copy_of_object" + s3_object_name,
                        CopySource={
                            "Bucket": bucket_name_to_create,
                            "Key": s3_object_name,
                        },
                    )
                    if status is None:
                        raise TestExecError("copy object failed")

            # delete objects
            if config.test_ops.get("delete_bucket_object", False):
                if config.test_ops.get("enable_version", False):
                    for name, path in objects_created_list:
                        reusable.delete_version_object(bucket, name, path,
                                                       rgw_conn, each_user)
                else:
                    reusable.delete_objects(bucket)

            # start kafka broker and consumer
            event_record_path = "/home/cephuser/event_record"
            start_consumer = notification.start_kafka_broker_consumer(
                topic_name, event_record_path)
            if start_consumer is False:
                raise TestExecError("Kafka consumer not running")

            # verify all the attributes of the event record. if event not received abort testcase
            log.info("verify event record attributes")
            verify = notification.verify_event_record(event,
                                                      bucket_name_to_create,
                                                      event_record_path,
                                                      ceph_version_name)
            if verify is False:
                raise EventRecordDataError(
                    "Event record is empty! notification is not seen")

        # delete topic logs on kafka broker
        notification.del_topic_from_kafka_broker(topic_name)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #2
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    config.user_count = 1
    tenant1 = "MountEverest"
    tenant2 = "Himalayas"
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info["user_id"]],
        actions_list=["CreateBucket"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")
    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == "modify":
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info("modifying buckey policy")
        actions_list = ["ListBucket", "CreateBucket"]
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy["Policy"])
        log.info("got bucket policy:%s\n" % modified_policy)
        actions_list_from_modified_policy = modified_policy["Statement"][0][
            "Action"]
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info("cleaned_actions_list_from_modified_policy: %s" %
                 cleaned_actions_list_from_modified_policy)
        log.info("actions list to be modified: %s" % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info("cmp_val: %s" % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == "replace":
        log.info("replacing new bucket policy")
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["ListBucket"],
            resources=[t1_u1_bucket2.name],
        )
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("new bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == "delete":
        log.info("in delete bucket policy")
        delete_policy = s3lib.resource_op({
            "obj": bucket_policy_obj,
            "resource": "delete",
            "args": None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "NoSuchBucketPolicy":
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #3
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            if radoslist_all_error:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # check the default data log backing
    default_data_log = reusable.get_default_datalog_type()
    log.info(f"{default_data_log} is the default data log backing")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        objects_created_list = []

        # change the default datalog backing to FIFO
        if config.test_ops.get("change_datalog_backing", False):
            logtype = config.test_ops["change_datalog_backing"]
            log.info(f"change default datalog backing to {logtype}")
            cmd = f"radosgw-admin datalog type --log-type={logtype}"
            change_datalog_type = utils.exec_shell_cmd(cmd)
            if change_datalog_type is False:
                raise TestExecError("Failed to change the datalog type to fifo")
            log.info(
                "restart the rgw daemons and sleep of 30secs for rgw daemon to be up "
            )
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(
                        bucket, rgw_conn, each_user, write_bucket_io_info
                    )
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info(
                        "top level s3 objects to create: %s" % config.objects_count
                    )
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        objects_created_list.append((s3_object_name, s3_object_path))
                        # deleting the local file created after upload
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)

        # delete  object and bucket
        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(
                        bucket, name, path, rgw_conn, each_user
                    )
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check for any ERRORs in datalog list. ref- https://bugzilla.redhat.com/show_bug.cgi?id=1917687
    error_in_data_log_list = reusable.check_datalog_list()
    if error_in_data_log_list:
        raise TestExecError("Error in datalog list")

    # check for data log markers. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1831798#c22
    data_log_marker = reusable.check_datalog_marker()
    log.info(f"The data_log_marker is: {data_log_marker}")

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    extra_user = s3lib.create_users(1)[0]
    extra_user_auth = Auth(extra_user, ssl=config.ssl)
    extra_user_conn = extra_user_auth.do_auth()
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        s3_object_names = []
        # create buckets
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                each_user["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = s3lib.resource_op({
                "obj": rgw_conn,
                "resource": "Bucket",
                "args": [bucket_name_to_create]
            })
            # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
            created = s3lib.resource_op({
                "obj": bucket,
                "resource": "create",
                "args": None,
                "extra_info": {
                    "access_key": each_user["access_key"]
                },
            })
            if created is False:
                raise TestExecError(
                    "Resource execution failed: bucket creation faield")
            if created is not None:
                response = HttpResponseParser(created)
                if response.status_code == 200:
                    log.info("bucket created")
                else:
                    raise TestExecError("bucket creation failed")
            else:
                raise TestExecError("bucket creation failed")
            # getting bucket version object
            if config.test_ops["enable_version"] is True:
                log.info("bucket versionig test on bucket: %s" % bucket.name)
                # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                bucket_versioning = s3lib.resource_op({
                    "obj": rgw_conn,
                    "resource": "BucketVersioning",
                    "args": [bucket.name],
                })
                # checking the versioning status
                # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                version_status = s3lib.resource_op({
                    "obj": bucket_versioning,
                    "resource": "status",
                    "args": None
                })
                if version_status is None:
                    log.info("bucket versioning still not enabled")
                # enabling bucket versioning
                # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                version_enable_status = s3lib.resource_op({
                    "obj": bucket_versioning,
                    "resource": "enable",
                    "args": None,
                })
                response = HttpResponseParser(version_enable_status)
                if response.status_code == 200:
                    log.info("version enabled")
                    write_bucket_io_info.add_versioning_status(
                        each_user["access_key"],
                        bucket.name,
                        VERSIONING_STATUS["ENABLED"],
                    )

                else:
                    raise TestExecError("version enable failed")
                if config.objects_count > 0:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, s3_object_size in list(
                            config.mapped_sizes.items()):
                        # versioning upload
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, str(oc))
                        s3_object_names.append(s3_object_name)
                        log.info("s3 object name: %s" % s3_object_name)
                        log.info("versioning count: %s" % config.version_count)
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, str(oc))
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        original_data_info = manage_data.io_generator(
                            s3_object_path, s3_object_size)
                        if original_data_info is False:
                            TestExecError("data creation failed")
                        created_versions_count = 0
                        for vc in range(config.version_count):
                            log.info("version count for %s is %s" %
                                     (s3_object_name, str(vc)))
                            log.info("modifying data: %s" % s3_object_name)
                            modified_data_info = manage_data.io_generator(
                                s3_object_path,
                                s3_object_size,
                                op="append",
                                **{
                                    "message":
                                    "\nhello for version: %s\n" % str(vc)
                                })
                            if modified_data_info is False:
                                TestExecError("data modification failed")
                            log.info("uploading s3 object: %s" %
                                     s3_object_path)
                            upload_info = dict(
                                {
                                    "access_key":
                                    each_user["access_key"],
                                    "versioning_status":
                                    VERSIONING_STATUS["ENABLED"],
                                    "version_count_no":
                                    vc,
                                }, **modified_data_info)
                            s3_obj = s3lib.resource_op({
                                "obj":
                                bucket,
                                "resource":
                                "Object",
                                "args": [s3_object_name],
                                "extra_info":
                                upload_info,
                            })
                            object_uploaded_status = s3lib.resource_op({
                                "obj":
                                s3_obj,
                                "resource":
                                "upload_file",
                                "args": [modified_data_info["name"]],
                                "extra_info":
                                upload_info,
                            })
                            if object_uploaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object upload failed"
                                )
                            if object_uploaded_status is None:
                                log.info("object uploaded")
                                s3_obj = rgw_conn.Object(
                                    bucket.name, s3_object_name)
                                log.info("current_version_id: %s" %
                                         s3_obj.version_id)
                                key_version_info = basic_io_structure.version_info(
                                    **{
                                        "version_id": s3_obj.version_id,
                                        "md5_local": upload_info["md5"],
                                        "count_no": vc,
                                        "size": upload_info["size"],
                                    })
                                log.info("key_version_info: %s" %
                                         key_version_info)
                                write_key_io_info.add_versioning_info(
                                    each_user["access_key"],
                                    bucket.name,
                                    s3_object_path,
                                    key_version_info,
                                )
                                created_versions_count += 1
                                log.info("created_versions_count: %s" %
                                         created_versions_count)
                                log.info("adding metadata")
                                metadata1 = {
                                    "m_data1": "this is the meta1 for this obj"
                                }
                                s3_obj.metadata.update(metadata1)
                                metadata2 = {
                                    "m_data2": "this is the meta2 for this obj"
                                }
                                s3_obj.metadata.update(metadata2)
                                log.info("metadata for this object: %s" %
                                         s3_obj.metadata)
                                log.info("metadata count for object: %s" %
                                         (len(s3_obj.metadata)))
                                if not s3_obj.metadata:
                                    raise TestExecError(
                                        "metadata not created even adding metadata"
                                    )
                                versions = bucket.object_versions.filter(
                                    Prefix=s3_object_name)
                                created_versions_count_from_s3 = len(
                                    [v.version_id for v in versions])
                                log.info("created versions count on s3: %s" %
                                         created_versions_count_from_s3)
                                if (created_versions_count is
                                        created_versions_count_from_s3):
                                    log.info(
                                        "no new versions are created when added metdata"
                                    )
                                else:
                                    raise TestExecError(
                                        "version count missmatch, "
                                        "possible creation of version on adding metadata"
                                    )
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name + ".download")
                            object_downloaded_status = s3lib.resource_op({
                                "obj":
                                bucket,
                                "resource":
                                "download_file",
                                "args":
                                [s3_object_name, s3_object_download_path],
                            })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            # checking md5 of the downloaded file
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            log.info("downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("uploaded_md5: %s" %
                                     modified_data_info["md5"])
                            # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path)
                        log.info("all versions for the object: %s\n" %
                                 s3_object_name)
                        versions = bucket.object_versions.filter(
                            Prefix=s3_object_name)
                        for version in versions:
                            log.info("key_name: %s --> version_id: %s" %
                                     (version.object_key, version.version_id))
                        if config.test_ops.get("set_acl", None) is True:
                            s3_obj_acl = s3lib.resource_op({
                                "obj":
                                rgw_conn,
                                "resource":
                                "ObjectAcl",
                                "args": [bucket.name, s3_object_name],
                            })
                            # setting acl to private, just need to set to any acl and
                            # check if its set - check by response code
                            acls_set_status = s3_obj_acl.put(ACL="private")
                            response = HttpResponseParser(acls_set_status)
                            if response.status_code == 200:
                                log.info("ACLs set")
                            else:
                                raise TestExecError("Acls not Set")
                            # get obj details based on version id
                            for version in versions:
                                log.info("getting info for version id: %s" %
                                         version.version_id)
                                obj = s3lib.resource_op({
                                    "obj":
                                    rgw_conn,
                                    "resource":
                                    "Object",
                                    "args": [bucket.name, s3_object_name],
                                })
                                log.info(
                                    "obj get detils :%s\n" %
                                    (obj.get(VersionId=version.version_id)))
                        if config.test_ops["copy_to_version"] is True:
                            # reverting object to one of the versions ( randomly chosen )
                            version_id_to_copy = random.choice(
                                [v.version_id for v in versions])
                            log.info("version_id_to_copy: %s" %
                                     version_id_to_copy)
                            s3_obj = rgw_conn.Object(bucket.name,
                                                     s3_object_name)
                            log.info("current version_id: %s" %
                                     s3_obj.version_id)
                            copy_response = s3_obj.copy_from(
                                CopySource={
                                    "Bucket": bucket.name,
                                    "Key": s3_object_name,
                                    "VersionId": version_id_to_copy,
                                })
                            log.info("copy_response: %s" % copy_response)
                            if copy_response is None:
                                raise TestExecError(
                                    "copy object from version id failed")
                            # current_version_id = copy_response['VersionID']
                            log.info("current_version_id: %s" %
                                     s3_obj.version_id)
                            # delete the version_id_to_copy object
                            s3_obj.delete(VersionId=version_id_to_copy)
                            log.info(
                                "all versions for the object after the copy operation: %s\n"
                                % s3_object_name)
                            for version in versions:
                                log.info(
                                    "key_name: %s --> version_id: %s" %
                                    (version.object_key, version.version_id))
                            # log.info('downloading current s3object: %s' % s3_object_name)
                            # s3_obj.download_file(s3_object_name + ".download")
                        if config.test_ops["delete_object_versions"] is True:
                            log.info("deleting s3_obj keys and its versions")
                            s3_obj = s3lib.resource_op({
                                "obj":
                                rgw_conn,
                                "resource":
                                "Object",
                                "args": [bucket.name, s3_object_name],
                            })
                            log.info("deleting versions for s3 obj: %s" %
                                     s3_object_name)
                            for version in versions:
                                log.info("trying to delete obj version: %s" %
                                         version.version_id)
                                del_obj_version = s3lib.resource_op({
                                    "obj":
                                    s3_obj,
                                    "resource":
                                    "delete",
                                    "kwargs":
                                    dict(VersionId=version.version_id),
                                })
                                log.info("response:\n%s" % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(
                                        del_obj_version)
                                    if response.status_code == 204:
                                        log.info("version deleted ")
                                        reusable.delete_version_object(
                                            bucket,
                                            version.version_id,
                                            s3_object_path,
                                            rgw_conn,
                                            each_user,
                                        )
                                    else:
                                        raise TestExecError(
                                            "version  deletion failed")
                                else:
                                    raise TestExecError(
                                        "version deletion failed")
                            log.info("available versions for the object")
                            versions = bucket.object_versions.filter(
                                Prefix=s3_object_name)
                            for version in versions:
                                log.info(
                                    "key_name: %s --> version_id: %s" %
                                    (version.object_key, version.version_id))
                        if config.test_ops.get(
                                "delete_from_extra_user") is True:
                            log.info(
                                "trying to delete objects from extra user")
                            s3_obj = s3lib.resource_op({
                                "obj":
                                extra_user_conn,
                                "resource":
                                "Object",
                                "args": [bucket.name, s3_object_name],
                            })
                            log.info("deleting versions for s3 obj: %s" %
                                     s3_object_name)
                            for version in versions:
                                log.info("trying to delete obj version: %s" %
                                         version.version_id)
                                del_obj_version = s3lib.resource_op({
                                    "obj":
                                    s3_obj,
                                    "resource":
                                    "delete",
                                    "kwargs":
                                    dict(VersionId=version.version_id),
                                })
                                log.info("response:\n%s" % del_obj_version)
                                if del_obj_version is not False:
                                    response = HttpResponseParser(
                                        del_obj_version)
                                    if response.status_code == 204:
                                        log.info("version deleted ")
                                        write_key_io_info.delete_version_info(
                                            each_user["access_key"],
                                            bucket.name,
                                            s3_object_path,
                                            version.version_id,
                                        )
                                        raise TestExecError(
                                            "version and deleted, this should not happen"
                                        )
                                    else:
                                        log.info(
                                            "version did not delete, expected behaviour"
                                        )
                                else:
                                    log.info(
                                        "version did not delete, expected behaviour"
                                    )
                        if config.local_file_delete is True:
                            log.info("deleting local file")
                            utils.exec_shell_cmd("sudo rm -rf %s" %
                                                 s3_object_path)
                if config.test_ops["suspend_version"] is True:
                    log.info("suspending versioning")
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                    suspend_version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "suspend",
                        "args": None
                    })
                    response = HttpResponseParser(suspend_version_status)
                    if response.status_code == 200:
                        log.info("versioning suspended")
                        write_bucket_io_info.add_versioning_status(
                            each_user["access_key"],
                            bucket.name,
                            VERSIONING_STATUS["SUSPENDED"],
                        )
                    else:
                        raise TestExecError("version suspend failed")
                    # getting all objects in the bucket
                    log.info("getting all objects in the bucket")
                    objects = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "objects",
                        "args": None
                    })
                    log.info("objects :%s" % objects)
                    all_objects = s3lib.resource_op({
                        "obj": objects,
                        "resource": "all",
                        "args": None
                    })
                    log.info("all objects: %s" % all_objects)
                    log.info("all objects2 :%s " % bucket.objects.all())
                    for obj in all_objects:
                        log.info("object_name: %s" % obj.key)
                        versions = bucket.object_versions.filter(
                            Prefix=obj.key)
                        log.info("displaying all versions of the object")
                        for version in versions:
                            log.info("key_name: %s --> version_id: %s" %
                                     (version.object_key, version.version_id))
                if config.test_ops.get("suspend_from_extra_user") is True:
                    log.info("suspending versioning from extra user")
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')

                    bucket_versioning = s3lib.resource_op({
                        "obj":
                        extra_user_conn,
                        "resource":
                        "BucketVersioning",
                        "args": [bucket.name],
                    })

                    suspend_version_status = s3lib.resource_op({
                        "obj": bucket_versioning,
                        "resource": "suspend",
                        "args": None
                    })
                    if suspend_version_status is not False:
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info("versioning suspended")
                            write_bucket_io_info.add_versioning_status(
                                each_user["access_key"],
                                bucket.name,
                                VERSIONING_STATUS["SUSPENDED"],
                            )
                            raise TestExecError(
                                "version suspended, this should not happen")
                    else:
                        log.info(
                            "versioning not suspended, expected behaviour")
            if config.test_ops.get("upload_after_suspend") is True:
                log.info(
                    "trying to upload after suspending versioning on bucket")
                for oc, s3_object_size in list(config.mapped_sizes.items()):
                    # non versioning upload
                    s3_object_name = s3_object_names[
                        oc] + ".after_version_suspending"
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    non_version_data_info = manage_data.io_generator(
                        s3_object_path,
                        s3_object_size,
                        op="append",
                        **{"message": "\nhello for non version\n"})
                    if non_version_data_info is False:
                        TestExecError("data creation failed")
                    log.info("uploading s3 object: %s" % s3_object_path)
                    upload_info = dict(
                        {
                            "access_key": each_user["access_key"],
                            "versioning_status": "suspended",
                        }, **non_version_data_info)
                    s3_obj = s3lib.resource_op({
                        "obj": bucket,
                        "resource": "Object",
                        "args": [s3_object_name],
                        "extra_info": upload_info,
                    })
                    object_uploaded_status = s3lib.resource_op({
                        "obj":
                        s3_obj,
                        "resource":
                        "upload_file",
                        "args": [non_version_data_info["name"]],
                        "extra_info":
                        upload_info,
                    })

                    if object_uploaded_status is False:
                        raise TestExecError(
                            "Resource execution failed: object upload failed")
                    if object_uploaded_status is None:
                        log.info("object uploaded")
                    s3_obj = s3lib.resource_op({
                        "obj":
                        rgw_conn,
                        "resource":
                        "Object",
                        "args": [bucket.name, s3_object_name],
                    })
                    log.info("version_id: %s" % s3_obj.version_id)
                    if s3_obj.version_id is None:
                        log.info("Versions are not created after suspending")
                    else:
                        raise TestExecError(
                            "Versions are created even after suspending")
                    s3_object_download_path = os.path.join(
                        TEST_DATA_PATH, s3_object_name + ".download")
                    object_downloaded_status = s3lib.resource_op({
                        "obj":
                        bucket,
                        "resource":
                        "download_file",
                        "args": [s3_object_name, s3_object_download_path],
                    })
                    if object_downloaded_status is False:
                        raise TestExecError(
                            "Resource execution failed: object download failed"
                        )
                    if object_downloaded_status is None:
                        log.info("object downloaded")
                    # checking md5 of the downloaded file
                    s3_object_downloaded_md5 = utils.get_md5(
                        s3_object_download_path)
                    log.info("s3_object_downloaded_md5: %s" %
                             s3_object_downloaded_md5)
                    log.info("s3_object_uploaded_md5: %s" %
                             non_version_data_info["md5"])
                    if config.local_file_delete is True:
                        utils.exec_shell_cmd("sudo rm -rf %s" % s3_object_path)
            if config.test_ops.get("delete_bucket") is True:
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #6
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    if config.dbr_scenario == "brownfield":
        user_brownfiled = "brownfield_user"
        all_users_info = s3lib.create_users(config.user_count, user_brownfiled)
    else:
        all_users_info = s3lib.create_users(config.user_count)

    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=%s"
                % (zone, compression_type)
            )
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (
                        data["placement_pools"][0]["val"]["compression"]
                        == compression_type
                    ):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (
                            data["placement_pools"][0]["val"]["storage_classes"][
                                "STANDARD"
                            ]["compression_type"]
                            == compression_type
                        ):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)
        if config.dynamic_resharding is True:
            if utils.check_dbr_support():
                log.info("making changes to ceph.conf")
                ceph_conf.set_to_ceph_conf(
                    "global",
                    ConfigOpts.rgw_max_objs_per_shard,
                    str(config.max_objects_per_shard),
                )
                srv_restarted = rgw_service.restart()

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                if config.bucket_sync_crash is True:
                    is_primary = utils.is_cluster_primary()
                    if is_primary:
                        bucket_name_to_create = "bkt_crash_check"
                if config.dbr_scenario == "brownfield":
                    bucket_name_to_create = "brownfield_bucket"

                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.dynamic_resharding is True:
                    reusable.check_sync_status()
                    op = utils.exec_shell_cmd(
                        f"radosgw-admin bucket stats --bucket {bucket.name}"
                    )
                    json_doc = json.loads(op)
                    old_num_shards = json_doc["num_shards"]
                    log.info(f"no_of_shards_created: {old_num_shards}")
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    if utils.check_dbr_support():
                        if bucket_name_to_create == "brownfield_bucket":
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            if bool(json_doc["usage"]):
                                num_object = json_doc["usage"]["rgw.main"][
                                    "num_objects"
                                ]
                                config.objects_count = (
                                    num_object * 2 + config.objects_count
                                )
                                config.mapped_sizes = utils.make_mapped_sizes(config)

                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name
                            )
                            log.info(
                                "s3_object_download_path: %s" % s3_object_download_path
                            )
                            log.info(
                                "downloading to filename: %s" % s3_object_download_name
                            )
                            if (
                                config.test_ops.get("encryption_algorithm", None)
                                is not None
                            ):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s"
                                    % config.test_ops["encryption_algorithm"]
                                )
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey": encryption_key,
                                        "SSECustomerAlgorithm": config.test_ops[
                                            "encryption_algorithm"
                                        ],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op(
                                    {
                                        "obj": bucket,
                                        "resource": "download_file",
                                        "args": [
                                            s3_object_name,
                                            s3_object_download_path,
                                        ],
                                    }
                                )
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path
                            )
                            s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
                            log.info(
                                "s3_object_downloaded_md5: %s"
                                % s3_object_downloaded_md5
                            )
                            log.info(
                                "s3_object_uploaded_md5: %s" % s3_object_uploaded_md5
                            )
                            if str(s3_object_uploaded_md5) == str(
                                s3_object_downloaded_md5
                            ):
                                log.info("md5 match")
                                utils.exec_shell_cmd(
                                    "rm -rf %s" % s3_object_download_path
                                )
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    if config.bucket_sync_crash is True:
                        is_primary = utils.is_cluster_primary()
                        if is_primary is False:
                            crash_info = reusable.check_for_crash()
                            if crash_info:
                                raise TestExecError("ceph daemon crash found!")
                            realm, source_zone = utils.get_realm_source_zone_info()
                            log.info(f"Realm name: {realm}")
                            log.info(f"Source zone name: {source_zone}")
                            for i in range(600):  # Running sync command for 600 times
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket sync run --bucket bkt_crash_check --rgw-curl-low-speed-time=0 --source-zone {source_zone} --rgw-realm {realm}"
                                )
                                crash_info = reusable.check_for_crash()
                                if crash_info:
                                    raise TestExecError("ceph daemon crash found!")
                                time.sleep(1)
                    if config.dynamic_resharding is True:
                        if utils.check_dbr_support():
                            reusable.check_sync_status()
                            for i in range(10):
                                time.sleep(
                                    60
                                )  # Adding delay for processing reshard list
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket stats --bucket {bucket.name}"
                                )
                                json_doc = json.loads(op)
                                new_num_shards = json_doc["num_shards"]
                                log.info(f"no_of_shards_created: {new_num_shards}")
                                if new_num_shards > old_num_shards:
                                    break
                            else:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    if config.manual_resharding is True:
                        if utils.check_dbr_support():
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            old_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {old_num_shards}")
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard add --bucket {bucket.name} --num-shards {config.shards}"
                            )
                            op = utils.exec_shell_cmd("radosgw-admin reshard process")
                            time.sleep(60)
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            new_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {new_num_shards}")
                            if new_num_shards <= old_num_shards:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    # verification of shards after upload
                    if config.test_datalog_trim_command is True:
                        shard_id, end_marker = reusable.get_datalog_marker()
                        cmd = f"sudo radosgw-admin datalog trim --shard-id {shard_id} --end-marker {end_marker} --debug_ms=1 --debug_rgw=20"
                        out, err = utils.exec_shell_cmd(cmd, debug_info=True)
                        if "Segmentation fault" in err:
                            raise TestExecError("Segmentation fault occured")

                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name
                        )
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (
                            out.replace('"', "")
                            .strip()
                            .split(":")[1]
                            .strip()
                            .replace(",", "")
                        )
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info("got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        time.sleep(10)
                        reusable.check_sync_status()
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=none" % zone
            )
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #7
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=%s" %
                   (zone, compression_type))
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (data["placement_pools"][0]["val"]["compression"] ==
                            compression_type):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (data["placement_pools"][0]["val"]
                            ["storage_classes"]["STANDARD"]["compression_type"]
                                == compression_type):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info("s3_object_download_path: %s" %
                                     s3_object_download_path)
                            log.info("downloading to filename: %s" %
                                     s3_object_download_name)
                            if (config.test_ops.get("encryption_algorithm",
                                                    None) is not None):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s" %
                                    config.test_ops["encryption_algorithm"])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey":
                                        encryption_key,
                                        "SSECustomerAlgorithm":
                                        config.
                                        test_ops["encryption_algorithm"],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    "obj":
                                    bucket,
                                    "resource":
                                    "download_file",
                                    "args": [
                                        s3_object_name,
                                        s3_object_download_path,
                                    ],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info("s3_object_downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("s3_object_uploaded_md5: %s" %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info("md5 match")
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info(
                                "deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (out.replace(
                            '"',
                            "").strip().split(":")[1].strip().replace(",", ""))
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            "got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        log.info(
                            "set debug_rgw to 20 before delete the bucket")
                        config.debug_rgw = 20
                        ceph_conf.set_to_ceph_conf("global",
                                                   ConfigOpts.debug_rgw,
                                                   str(config.debug_rgw))
                        log.info("trying to restart services")
                        srv_restarted = rgw_service.restart()
                        time.sleep(20)
                        if srv_restarted is False:
                            raise TestExecError("RGW service restart failed")
                        else:
                            log.info("RGW service restarted")
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=none" %
                   zone)
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    if config.test_ops.get("upload_type") == "multipart":
        srv_time_pre_op = get_svc_time()

    # create user
    tenant1 = "tenant_" + random.choice(string.ascii_letters)
    tenant1_user_info = s3lib.create_tenant_users(tenant_name=tenant1,
                                                  no_of_users_to_create=2)
    tenant1_user1_info = tenant1_user_info[0]
    tenant1_user2_info = tenant1_user_info[1]

    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant1_user2_auth = Auth(tenant1_user2_info, ssl=config.ssl)

    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant1_user2 = tenant1_user2_auth.do_auth()
    rgw_tenant1_user2_c = tenant1_user2_auth.do_auth_using_client()

    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant1_user2_info["user_id"]],
        actions_list=["ListBucketMultiPartUploads"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")

    if config.test_ops.get("upload_type") == "multipart":
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            for bucket in [t1_u1_bucket1, t1_u1_bucket2]:
                s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                log.info("s3 objects to create: %s" % config.objects_count)
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    tenant1_user1_info,
                )
        srv_time_post_op = get_svc_time()
        log.info(srv_time_pre_op)
        log.info(srv_time_post_op)

        if srv_time_post_op > srv_time_pre_op:
            log.info("Service is running without crash")
        else:
            raise TestExecError("Service got crashed")

    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])

    # List multipart uploads with tenant1_user2 user with bucket t1_u1_bucket1
    multipart_object1 = rgw_tenant1_user2_c.list_multipart_uploads(
        Bucket=t1_u1_bucket1.name)
    log.info("Multipart object %s" % multipart_object1)

    # Verify tenant1_user2 not having permission for listing multipart uploads in t1_u1_bucket2
    try:
        multipart_object2 = rgw_tenant1_user2_c.list_multipart_uploads(
            Bucket=t1_u1_bucket2.name)
        raise Exception(
            "%s user should not list multipart uploads in bucket: %s" %
            (tenant1_user2_info["user_id"], t1_u1_bucket2.name))
    except ClientError as err:
        log.info("Listing failed as expected with exception: %s" % err)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #9
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()

    version_count = 3
    # create user
    s3_user = s3lib.create_users(1)[0]
    # authenticate
    auth = Auth(s3_user, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    b1_name = utils.gen_bucket_name_from_userid(s3_user["user_id"], rand_no=1)
    b1_k1_name = b1_name + ".key.1"  # key1
    b1_k2_name = b1_name + ".key.2"  # key2
    b2_name = utils.gen_bucket_name_from_userid(s3_user["user_id"], rand_no=2)
    b2_k1_name = b2_name + ".key.1"  # key1
    b2_k2_name = b2_name + ".key.2"  # key2
    b1 = reusable.create_bucket(b1_name, rgw_conn, s3_user)
    b2 = reusable.create_bucket(b2_name, rgw_conn, s3_user)
    # enable versioning on b1
    reusable.enable_versioning(b1, rgw_conn, s3_user, write_bucket_io_info)
    # upload object to version enabled bucket b1
    obj_sizes = list(config.mapped_sizes.values())
    config.obj_size = obj_sizes[0]
    for vc in range(version_count):
        reusable.upload_object(
            b1_k1_name,
            b1,
            TEST_DATA_PATH,
            config,
            s3_user,
            append_data=True,
            append_msg="hello vc count: %s" % str(vc),
        )
    # upload object to non version bucket b2
    config.obj_size = obj_sizes[1]
    reusable.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config, s3_user)
    # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created
    # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present
    b1_k2 = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "Object",
        "args": [b1.name, b1_k2_name]
    })
    b2_k2 = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "Object",
        "args": [b2.name, b2_k2_name]
    })
    log.info(
        "copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket"
    )
    copy_response = b1_k2.copy_from(CopySource={
        "Bucket": b2.name,
        "Key": b2_k1_name,
    })
    log.info("copy_response: %s" % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info("checking if copies object has version id created")
    b1_k2_version_id = b1_k2.version_id
    log.info("version id: %s" % b1_k2_version_id)
    if b1_k2_version_id is None:
        raise TestExecError(
            "Version ID not created for the copied object on to the versioned enabled bucket"
        )
    else:
        log.info(
            "Version ID created for the copied object on to the versioned bucket"
        )
    all_objects_in_b1 = b1.objects.all()
    log.info("all objects in bucket 1")
    for obj in all_objects_in_b1:
        log.info("object_name: %s" % obj.key)
        versions = b1.object_versions.filter(Prefix=obj.key)
        log.info("displaying all versions of the object")
        for version in versions:
            log.info("key_name: %s --> version_id: %s" %
                     (version.object_key, version.version_id))
    log.info("-------------------------------------------")
    log.info("copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket")
    copy_response = b2_k2.copy_from(CopySource={
        "Bucket": b1.name,
        "Key": b1_k1_name,
    })
    log.info("copy_response: %s" % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info("checking if copies object has version id created")
    b2_k2_version_id = b2_k2.version_id
    log.info("version id: %s" % b2_k2_version_id)
    if b2_k2_version_id is None:
        log.info(
            "Version ID not created for the copied object on to the non versioned bucket"
        )
    else:
        raise TestExecError(
            "Version ID created for the copied object on to the non versioned bucket"
        )
    all_objects_in_b2 = b2.objects.all()
    log.info("all objects in bucket 2")
    for obj in all_objects_in_b2:
        log.info("object_name: %s" % obj.key)
        versions = b2.object_versions.filter(Prefix=obj.key)
        log.info("displaying all versions of the object")
        for version in versions:
            log.info("key_name: %s --> version_id: %s" %
                     (version.object_key, version.version_id))

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")