def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    ceph_version = utils.exec_shell_cmd("ceph version")
    op = ceph_version.split()
    if "pacific" in op:
        commands = [
            f"ceph config set client.rgw rgw_lc_max_worker {config.rgw_lc_max_worker}",
            "ceph config set client.rgw rgw_lc_debug_interval 30",
        ]
        for command in commands:
            utils.exec_shell_cmd(command)
    if "nautilus" in op:
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_max_worker,
                                   str(config.rgw_lc_max_worker))
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    config.user_count = 1
    config.bucket_count = 1
    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                    rand_no=1)
    obj_list = []
    obj_tag = "suffix1=WMV1"
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(
        map(
            lambda x: x,
            [
                rule["Filter"].get("Prefix")
                or rule["Filter"]["And"].get("Prefix")
                for rule in config.lifecycle_conf
            ],
        ))
    prefix = prefix if prefix else ["dummy1"]
    if config.test_ops["enable_versioning"] is True:
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops["version_count"] > 0:
                    for vc in range(config.test_ops["version_count"]):
                        log.info("version count for %s is %s" %
                                 (s3_object_name, str(vc)))
                        log.info("modifying data: %s" % s3_object_name)
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            user_info,
                            append_data=True,
                            append_msg="hello object for version: %s\n" %
                            str(vc),
                        )
                else:
                    log.info("s3 objects to create: %s" % config.objects_count)
                    reusable.upload_object(s3_object_name, bucket,
                                           TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops["delete_marker"] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                                   life_cycle_rule_new, config)
    if config.test_ops["enable_versioning"] is False:
        if config.test_ops["create_object"] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + "." + bucket.name + "." + str(oc)
                obj_list.append(s3_object_name)
                reusable.upload_object_with_tagging(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_conf}
        reusable.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                               life_cycle_rule, config)
        lc_ops.validate_and_rule(bucket, config)
    reusable.remove_user(user_info)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #2
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    umgmt = UserMgmt()

    host, ip = utils.get_hostname_ip()
    port = utils.get_radosgw_port_no()
    hostname = str(ip) + ":" + str(port)
    log.info(hostname)

    # preparing data
    admin_api_user = "******" + randomString()
    log.info(admin_api_user)
    user_info = umgmt.create_rest_admin_user(user_id=admin_api_user,
                                             displayname=admin_api_user)

    rgw = RGWAdmin(
        access_key=user_info["access_key"],
        secret_key=user_info["secret_key"],
        server=hostname,
        secure=False,
        verify=False,
    )

    api_user = "******" + randomString()
    log.info(api_user)
    for uc in range(config.user_count):
        # Create User
        data = rgw.create_user(uid=api_user,
                               display_name=api_user,
                               email=api_user + "@abc.xyz")
        log.info("User created successfully")
        log.info(data)
        log.info("verification starts")
        op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" %
                                  api_user)
        json_doc = json.loads(op)
        log.info(json_doc)
        v = verify_user(data, json_doc)
        if v is False:
            test_info.failed_status("test failed")
            sys.exit(1)
        log.info("Verification for create operation completed")

        # Update User
        data = rgw.modify_user(uid=api_user,
                               display_name=api_user + "_11",
                               email=api_user + "*****@*****.**")
        log.info("User Updated successfully")
        log.info(data)
        log.info("verification starts")
        op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" %
                                  api_user)
        json_doc = json.loads(op)
        log.info(json_doc)
        v = verify_user(data, json_doc)
        if v is False:
            test_info.failed_status("test failed")
            sys.exit(1)
        log.info("Verification for Update operation completed")

        # delete User
        data = rgw.remove_user(uid=api_user, purge_data=False)
        log.info(data)
        log.info("User removed")
        op = utils.exec_shell_cmd("radosgw-admin user list")
        json_doc = json.loads(op)
        if api_user in json_doc:
            test_info.failed_status("test failed")
            sys.exit(1)
        log.info("Verification for Delete operation completed")
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #3
0
def test_exec(config):

    test_info = AddTestInfo('Bucket Request Payer')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:

        test_info.started_info()

        # create user

        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)

        for each_user in all_users_info:

            # authenticate

            auth = Auth(each_user)
            rgw_conn = auth.do_auth()

            s3_object_names = []

            # create buckets

            log.info('no of buckets to create: %s' % config.bucket_count)

            for bc in range(config.bucket_count):

                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)

                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)

                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)

                bucket = resuables.create_bucket(
                    bucket_name=bucket_name_to_create,
                    rgw=rgw_conn,
                    user_info=each_user)

                bucket_request_payer = s3lib.resource_op({
                    'obj': rgw_conn,
                    'resource': 'BucketRequestPayment',
                    'args': [bucket.name]
                })

                # change the bucket request payer to 'requester'

                payer = {'Payer': 'Requester'}

                response = s3lib.resource_op({
                    'obj':
                    bucket_request_payer,
                    'resource':
                    'put',
                    'kwargs':
                    dict(RequestPaymentConfiguration=payer)
                })

                log.info(response)

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    # Adding caps for user1
    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    # user1 auth with iam_client
    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    # policy document
    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])
    print(policy_document)

    # role policy
    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")
    print(role_policy)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    # role creation happens here
    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    # Put role policy happening here
    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    # bucket creation operations now
    bucket_name = "testbucket" + user1["user_id"]

    # authenticating user1 for bucket creation operation
    auth = Auth(user1, ssl=config.ssl)
    user1_info = {
        "access_key": user1["access_key"],
        "secret_key": user1["secret_key"],
        "user_id": user1["user_id"],
    }
    s3_client_u1 = auth.do_auth()

    # bucket creation operation
    bucket = reusable.create_bucket(bucket_name, s3_client_u1, user1_info)

    # uploading objects to the bucket
    if config.test_ops["create_object"]:
        # uploading data
        log.info("s3 objects to create: %s" % config.objects_count)
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            s3_object_name = utils.gen_s3_object_name(bucket_name, oc)
            log.info("s3 object name: %s" % s3_object_name)
            s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
            log.info("s3 object path: %s" % s3_object_path)
            if config.test_ops.get("upload_type") == "multipart":
                log.info("upload type: multipart")
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )
            else:
                log.info("upload type: normal")
                reusable.upload_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )
    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }
    log.info("got the credentials after assume role")

    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client = s3client.do_auth_using_client()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    unexisting_object = bucket_name + "_unexisting_object"
    try:
        response = s3_client.head_object(Bucket=bucket_name, Key=unexisting_object)
    except botocore.exceptions.ClientError as e:
        response_code = e.response["Error"]["Code"]
        log.info(response_code)
        if e.response["Error"]["Code"] == "404":
            log.info("404 Unexisting Object Not Found")
        elif e.response["Error"]["Code"] == "403":
            raise TestExecError("Error code : 403 - HeadObject operation: Forbidden")
Beispiel #5
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        # create buckets
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=1)
                bucket = reusable.create_bucket(bucket_name, rgw_conn,
                                                each_user)
                if config.test_ops['enable_versioning'] is True:
                    log.info('bucket versionig test on bucket: %s' %
                             bucket.name)
                    # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                    bucket_versioning = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'BucketVersioning',
                        'args': [bucket.name]
                    })
                    version_status = s3lib.resource_op({
                        'obj': bucket_versioning,
                        'resource': 'status',
                        'args': None
                    })
                    if version_status is None:
                        log.info('bucket versioning still not enabled')
                    # enabling bucket versioning
                    version_enable_status = s3lib.resource_op({
                        'obj': bucket_versioning,
                        'resource': 'enable',
                        'args': None
                    })
                    response = HttpResponseParser(version_enable_status)
                    if response.status_code == 200:
                        log.info('version enabled')
                    else:
                        raise TestExecError("version enable failed")
                if config.test_ops['create_object'] is True:
                    # upload data
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket.name, oc)
                        if config.test_ops['version_count'] > 0:
                            for vc in range(config.test_ops['version_count']):
                                log.info('version count for %s is %s' %
                                         (s3_object_name, str(vc)))
                                log.info('modifying data: %s' % s3_object_name)
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                    append_data=True,
                                    append_msg='hello object for version: %s\n'
                                    % str(vc))
                        else:
                            log.info('s3 objects to create: %s' %
                                     config.objects_count)
                            reusable.upload_object(s3_object_name, bucket,
                                                   TEST_DATA_PATH, config,
                                                   each_user)
                bucket_life_cycle = s3lib.resource_op({
                    'obj': rgw_conn,
                    'resource': 'BucketLifecycleConfiguration',
                    'args': [bucket.name]
                })
                life_cycle = basic_lifecycle_config(prefix="key",
                                                    days=20,
                                                    id="rul1")
                put_bucket_life_cycle = s3lib.resource_op({
                    "obj":
                    bucket_life_cycle,
                    "resource":
                    "put",
                    "kwargs":
                    dict(LifecycleConfiguration=life_cycle)
                })
                log.info('put bucket life cycle:\n%s' % put_bucket_life_cycle)
                if put_bucket_life_cycle is False:
                    raise TestExecError(
                        "Resource execution failed: bucket creation faield")
                if put_bucket_life_cycle is not None:
                    response = HttpResponseParser(put_bucket_life_cycle)
                    if response.status_code == 200:
                        log.info('bucket life cycle added')
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                else:
                    raise TestExecError("bucket lifecycle addition failed")
                log.info('trying to retrieve bucket lifecycle config')
                get_bucket_life_cycle_config = s3lib.resource_op({
                    "obj":
                    rgw_conn2,
                    "resource":
                    'get_bucket_lifecycle_configuration',
                    "kwargs":
                    dict(Bucket=bucket.name)
                })
                if get_bucket_life_cycle_config is False:
                    raise TestExecError(
                        "bucket lifecycle config retrieval failed")
                if get_bucket_life_cycle_config is not None:
                    response = HttpResponseParser(get_bucket_life_cycle_config)
                    if response.status_code == 200:
                        log.info('bucket life cycle retrieved')
                    else:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                else:
                    raise TestExecError("bucket life cycle retrieved")
                if config.test_ops['create_object'] is True:
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(
                            bucket.name, oc)
                        if config.test_ops['version_count'] > 0:
                            if config.test_ops.get('delete_versioned_object',
                                                   None) is True:
                                log.info(
                                    'list all the versions of the object and delete the '
                                    'current version of the object')
                                log.info('all versions for the object: %s\n' %
                                         s3_object_name)
                                versions = bucket.object_versions.filter(
                                    Prefix=s3_object_name)
                                t1 = []
                                for version in versions:
                                    log.info(
                                        'key_name: %s --> version_id: %s' %
                                        (version.object_key,
                                         version.version_id))
                                    t1.append(version.version_id)
                                s3_object = s3lib.resource_op({
                                    'obj':
                                    rgw_conn,
                                    'resource':
                                    'Object',
                                    'args': [bucket.name, s3_object_name]
                                })
                                # log.info('object version to delete: %s -> %s' % (versions[0].object_key,
                                #                                                 versions[0].version_id))
                                delete_response = s3_object.delete()
                                log.info('delete response: %s' %
                                         delete_response)
                                if delete_response['DeleteMarker'] is True:
                                    log.info(
                                        'object delete marker is set to true')
                                else:
                                    raise TestExecError(
                                        "'object delete marker is set to false"
                                    )
                                log.info(
                                    'available versions for the object after delete marker is set'
                                )
                                t2 = []
                                versions_after_delete_marker_is_set = bucket.object_versions.filter(
                                    Prefix=s3_object_name)
                                for version in versions_after_delete_marker_is_set:
                                    log.info(
                                        'key_name: %s --> version_id: %s' %
                                        (version.object_key,
                                         version.version_id))
                                    t2.append(version.version_id)
                                t2.pop()
                                if t1 == t2:
                                    log.info('versions remained intact')
                                else:
                                    raise TestExecError(
                                        'versions are not intact after delete marker is set'
                                    )
                # modify bucket lifecycle configuration, modify expiration days here for the test case.
                if config.test_ops.get('modify_lifecycle', False) is True:
                    log.info('modifying lifecycle configuration')
                    life_cycle_modifed = basic_lifecycle_config(
                        prefix="key", days=15, id="rul1", status="Disabled")
                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle_modifed)
                    })
                    log.info('put bucket life cycle:\n%s' %
                             put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)

                        if response.status_code == 200:
                            log.info('bucket life cycle added')

                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info('trying to retrieve bucket lifecycle config')
                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        'get_bucket_lifecycle_configuration',
                        "kwargs":
                        dict(Bucket=bucket.name)
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)
                        modified_expiration_days = get_bucket_life_cycle_config[
                            'Rules'][0]['Expiration']['Days']
                        log.info('modified expiration days: %s' %
                                 modified_expiration_days)
                        if response.status_code == 200 and modified_expiration_days == 15:
                            log.info(
                                'bucket life cycle retrieved after modifying')
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after modifying"
                            )
                    else:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed after modifying"
                        )
                # disable bucket lifecycle configuration
                if config.test_ops.get('disable_lifecycle', False) is True:
                    log.info('disabling lifecycle configuration')
                    life_cycle_disabled_config = basic_lifecycle_config(
                        prefix="key", days=20, id="rul1", status="Disabled")
                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle_disabled_config)
                    })
                    log.info('put bucket life cycle:\n%s' %
                             put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info('bucket life cycle added')
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info('trying to retrieve bucket lifecycle config')
                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        'get_bucket_lifecycle_configuration',
                        "kwargs":
                        dict(Bucket=bucket.name)
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)
                        if response.status_code == 200 and get_bucket_life_cycle_config[
                                'Rules'][0]['Status'] == 'Disabled':
                            log.info('disabled_status: %s' %
                                     get_bucket_life_cycle_config['Rules'][0]
                                     ['Status'])
                            log.info(
                                'bucket life cycle retrieved after disabled')
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after disabled"
                            )
                    else:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed after disabled"
                        )
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    test_info = AddTestInfo('test with acls')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:

        test_info.started_info()

        s3_ops = ResourceOps()

        # create user

        config.user_count = 2

        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)

        u1 = all_users_info[0]
        u2 = all_users_info[1]

        # authenticate

        u1_auth = Auth(u1)
        u1_rgw_conn = u1_auth.do_auth()

        u2_auth = Auth(u2)
        u2_rgw_conn = u2_auth.do_auth()

        no_of_buckets_to_create = 3

        u1_buckets = []
        u2_buckets = []

        for i in range(no_of_buckets_to_create):

            u1_bucket = create_bucket(u1_rgw_conn, u1, rand_no=i)
            log.info('u1_bucket_name: %s' % u1_bucket.name)

            u1_buckets.append(u1_bucket)

            u2_bucket = create_bucket(u2_rgw_conn, u2, rand_no=i)
            log.info('u2_bucket_name: %s' % u2_bucket.name)

            u2_buckets.append(u2_bucket)

        # test_acls_private(u1_rgw_conn, u1, u2, u1_buckets[0], u2_buckets[0])

        test_acls_public_write(u1_rgw_conn, u1, u2, u1_buckets[1],
                               u2_buckets[1])

        # test_acls_public_read(u1_rgw_conn, u1, u2, u1_buckets[2], u2_buckets[2])

        # print u1_bucket_info.delete()

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #7
0
def test_exec(config):
    test_info = AddTestInfo('test swift user key gen')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()

        # preparing data

        user_names = ['tuffy', 'scooby', 'max']
        tenant1 = 'tenant'

        cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s --cluster %s' \
              %(user_names[0], user_names[0], tenant1, config.cluster_name)
        out = utils.exec_shell_cmd(cmd)

        if out is False:
            raise TestExecError("RGW User creation error")

        log.info('output :%s' % out)
        v1_as_json = json.loads(out)
        log.info('creted user_id: %s' % v1_as_json['user_id'])

        cmd2 = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s' \
               % (tenant1, user_names[0], user_names[0], tenant1, config.cluster_name)
        out2 = utils.exec_shell_cmd(cmd2)

        if out2 is False:
            raise TestExecError("sub-user creation error")

        v2_as_json = json.loads(out2)
        log.info('created subuser: %s' % v2_as_json['subusers'][0]['id'])

        cmd3 = 'radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret ' \
               '--cluster %s' %(user_names[0], user_names[0], tenant1, tenant1, config.cluster_name)
        out3 = utils.exec_shell_cmd(cmd3)

        if out3 is False:
            raise TestExecError("secret_key gen error")

        v3_as_json = json.loads(out3)
        log.info('created subuser: %s\nsecret_key generated: %s' %
                 (v3_as_json['swift_keys'][0]['user'],
                  v3_as_json['swift_keys'][0]['secret_key']))

        user_info = {
            'user_id': v3_as_json['swift_keys'][0]['user'],
            'key': v3_as_json['swift_keys'][0]['secret_key']
        }

        auth = Auth(user_info)

        rgw = auth.do_auth()

        for cc in range(config.container_count):

            container_name = utils.gen_bucket_name_from_userid(
                user_info['user_id'], rand_no=cc)

            container = swiftlib.resource_op({
                'obj': rgw,
                'resource': 'put_container',
                'args': [container_name]
            })

            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")

            for oc in range(config.objects_count):

                swift_object_name = utils.gen_s3_object_name(
                    '%s.container.%s' % (user_names[0], cc), oc)

                log.info('object name: %s' % swift_object_name)

                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)

                log.info('object path: %s' % object_path)

                object_size = utils.get_file_size(
                    config.objects_size_range['min'],
                    config.objects_size_range['max'])

                data_info = manage_data.io_generator(object_path, object_size)

                if data_info is False:
                    TestExecError("data creation failed")

                log.info('uploading object: %s' % object_path)

                with open(object_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #8
0
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    # preparing data
    user_names = ["tom", "ram", "sam"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(
        tenant_name=tenant, user_id=user_names[1], displayname=user_names[1]
    )
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[1])
    auth = Auth(user_info, config.ssl)
    rgw = auth.do_auth()

    container_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=0)
    container = swiftlib.resource_op(
        {"obj": rgw, "resource": "put_container", "args": [container_name]}
    )
    if container is False:
        raise TestExecError("Resource execution failed: container creation faield")
    for oc, size in list(config.mapped_sizes.items()):
        # upload objects to the container
        swift_object_name = fill_container(
            rgw, container_name, user_names[1], oc, 0, size
        )
        # delete all uploaded objects
        log.info("deleting all swift objects")
    auth_response = rgw.get_auth()
    token = auth_response[1]
    # test.txt file should contain container_name
    with open("test.txt", "w") as f:
        f.write(container_name)
    ip_and_port = rgw.authurl.split("/")[2]
    url = "http://{}/swift/v1/?bulk-delete".format(ip_and_port)
    test_file = open("test.txt", "r")
    headers = {
        "Accept": "application/json",
        "Content-Type": "text/plain",
        "X-Auth-Token": token,
    }
    response = requests.delete(
        url, headers=headers, files={"form_field_name": test_file}
    )
    if response.status_code == 200:
        log.info("Bulk delete succeeded")
    else:
        raise TestExecError(
            "Bulk delete failed with status code: %d" % response.status_code
        )

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get("enable_version", False):
                    log.info("enable bucket version")
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops["create_object"] is True:
                    if config.test_ops["object_structure"] == "flat":
                        # uploading data
                        log.info("top level s3 objects to create: %s" %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info("s3 object name: %s" % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info("s3 object path: %s" % s3_object_path)
                            if config.test_ops.get(
                                    "upload_type") == "multipart":
                                log.info("upload type: multipart")
                                reusable.upload_mutipart_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            else:
                                log.info("upload type: normal")
                                reusable.upload_object(
                                    s3_object_name,
                                    bucket,
                                    TEST_DATA_PATH,
                                    config,
                                    each_user,
                                )
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            # deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    "deleting local file created after the upload"
                                )
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_path)

                    # this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops["object_structure"] == "pseudo":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info("s3 object name: %s" % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info("s3 object path: %s" % s3_object_path)
                                if config.test_ops.get(
                                        "upload_type") == "multipart":
                                    log.info("upload type: multipart")
                                    reusable.upload_mutipart_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                else:
                                    log.info("upload type: normal")
                                    reusable.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                    )
                                # deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        "deleting local file created after the upload"
                                    )
                                    utils.exec_shell_cmd("rm -rf %s" %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops["create_object"] is False:
                    if config.test_ops[
                            "object_structure"] == "pseudo-dir-only":
                        log.info(
                            f"pseudo directories to create {config.pseudo_dir_count}"
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops["radoslist"] is True:
                    log.info(
                        "executing the command radosgw-admin bucket radoslist "
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter - rgw_bucket_index_max_aio
                ceph_version_id, ceph_version_name = utils.get_ceph_version()
                if ceph_version_name in ["luminous", "nautilus"]:
                    cmd = "ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.split()[1]
                else:
                    cmd = "ceph config get mon rgw_bucket_index_max_aio"
                    max_aio_output = utils.exec_shell_cmd(cmd)
                    max_aio = max_aio_output.rstrip("\n")

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json["usage"]["rgw.main"][
                    "num_objects"]

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    "measure the execution time taken to list via radosgw-admin command"
                )
                if config.test_ops["radosgw_listing_ordered"] is True:
                    log.info("ordered listing via radosgw-admin command")
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "ordered")
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops["radosgw_listing_ordered"] is False:
                    log.info("unordered listing via radosgw-admin command")
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, "unordered")
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f"with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins"
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info("measure the execution time taken to list via boto")
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f"with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins"
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        # radoslist on all buckets. BZ:https://bugzilla.redhat.com/show_bug.cgi?id=1892265
        if config.radoslist_all is True:
            log.info(
                "Executing the command radosgw-admin bucket radoslist on all buckets"
            )
            cmd = "radosgw-admin bucket radoslist | grep ERROR"
            radoslist_all_error = utils.exec_shell_cmd(cmd)
            print(radoslist_all_error)
            if radoslist_all_error is False:
                raise TestExecError("ERROR in radoslist command")

        if config.test_ops.get("delete_bucket_object", False):
            if config.test_ops.get("enable_version", False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)
Beispiel #10
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    objects_created_list = []

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    # making changes to max_objects_per_shard and rgw_gc_obj_min_wait to ceph.conf
    log.info('making changes to ceph.conf')
    log.info(f'rgw_max_objs_per_shard parameter set to {str(config.max_objects_per_shard)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard))
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
    log.info(f'rgw gc obj min wait configuration parameter set to {str(config.rgw_gc_obj_min_wait)}')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_gc_obj_min_wait,str(config.rgw_gc_obj_min_wait))
    sleep_time = 10
    log.info(f'Restarting RGW service and waiting for {sleep_time} seconds')
    srv_restarted = rgw_service.restart()
    time.sleep(sleep_time)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    for each_user in all_users_info:
    # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                log.info(f'creating {str(bc)} bucket')
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create, rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user, write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('enable_version', False):
                           log.info('upload versioned objects')
                           reusable.upload_version_object(config, each_user, rgw_conn, s3_object_name, config.obj_size, bucket,
                                                                              TEST_DATA_PATH)
                        else:
                            log.info('upload type: normal')
                            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                        objects_created_list.append((s3_object_name, s3_object_path))
                #deleting the local file created after upload
                if config.local_file_delete is True:
                    log.info('deleting local file created after the upload')
                    utils.exec_shell_cmd('rm -rf %s' % s3_object_path)

                # listing the objects         
                if config.test_ops.get('list_objects', False):
                    if config.test_ops.get('enable_version', False):
                        for name,path in objects_created_list:
                            reusable.list_versioned_objects(bucket,name,path,rgw_conn)
                    else:
                        reusable.list_objects(bucket)
                
                if config.test_ops.get('delete_bucket_object', False):
                    if config.test_ops.get('enable_version', False):
                        for name, path in objects_created_list:
                            print("name, path",name,path)
                            versions = bucket.object_versions.filter(Prefix=name)
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, name]})
                            log.info('deleting versions for s3 obj: %s' % name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        reusable.delete_version_object(bucket,version.version_id, path, rgw_conn, each_user)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                    else:
                        reusable.delete_objects(bucket)
                    log.info(f'deleting the bucket {bucket_name_to_create}')
                    reusable.delete_bucket(bucket)

    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    
    #remove the user
    reusable.remove_user(each_user)
Beispiel #11
0
def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # preparing data
        user_names = ['user1', 'user2', 'user3']
        Bucket_names = ['bucket1', 'bucket2', 'bucket3']
        object_names = ['o1', 'o2']
        tenant1 = 'tenant1'
        tenant2 = 'tenant2'
        t1_u1_info = create_tenant_user(tenant_name=tenant1,
                                        user_id=user_names[0])
        t1_u1_auth = Auth(t1_u1_info)
        t1_u1 = t1_u1_auth.do_auth()
        t2_u1_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[0])
        t2_u1_auth = Auth(t2_u1_info)
        t2_u1 = t2_u1_auth.do_auth()
        t1_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t1_u1,
                                           user_info=t1_u1_info)
        t2_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t2_u1,
                                           user_info=t2_u1_info)
        obj_sizes = config.mapped_sizes.values()
        config.obj_size = obj_sizes[0]
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t1_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        config.obj_size = obj_sizes[1]
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t2_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        t2_u2_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[1])
        t2_u2_auth = Auth(t2_u2_info)
        t2_u2 = t2_u2_auth.do_auth()
        # will try to access the bucket and objects in both tenants
        # access t1_u1_b1
        log.info('trying to access tenant1->user1->bucket1')
        t1_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant1->user1->bucket1->object1 from tenant2->user2'
        )
        download_path1 = TEST_DATA_PATH + "/t1_u1_b1_%s.download" % object_names[
            0]
        t1_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t1_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path1]
        })
        if t1_u1_b1_o1_download is False:
            log.info('object not downloaded\n')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                "object downloaded for tenant1->user1->bucket1->object1, this should not happen"
            )

        log.info(
            'trying to access tenant2->user1->bucket1 from user2 in tenant 2')

        t2_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant2->user1->bucket1->object1 from tenant2->user2'
        )
        download_path2 = TEST_DATA_PATH + "/t2_u1_b1_%s.download" % object_names[
            0]
        t2_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t2_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path2]
        })
        if t2_u1_b1_o1_download is False:
            log.info('object did not download, worked as expected')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                'object downloaded\n'
                'downloaded tenant2->user1->bucket1->object1, this should not happen'
            )
        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #12
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops['sharding']['enable'] is True:
            log.info('enabling sharding on buckets')
            max_shards = config.test_ops['sharding']['max_shards']
            log.info('making changes to ceph.conf')
            ceph_conf.set_to_ceph_conf(
                'global', ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards))
            log.info('trying to restart services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.test_ops['compression']['enable'] is True:
            compression_type = config.test_ops['compression']['type']
            log.info('enabling compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=%s' % (zone,compression_type)
            out = utils.exec_shell_cmd(cmd)
            try:
                data = json.loads(out)
                if data['placement_pools'][0]['val']['storage_classes'][
                        'STANDARD']['compression_type'] == compression_type:
                    log.info('Compression enabled successfully')
                else:
                    raise ValueError('failed to enable compression')
            except ValueError as e:
                exit(str(e))
            log.info('trying to restart rgw services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        # create buckets
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = resuables.create_bucket(bucket_name_to_create,
                                                 rgw_conn, each_user)
                if config.test_ops['create_object'] is True:
                    # uploading data
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('upload_type') == 'multipart':
                            log.info('upload type: multipart')
                            resuables.upload_mutipart_object(
                                s3_object_name, bucket, TEST_DATA_PATH, config,
                                each_user)
                        else:
                            log.info('upload type: normal')
                            resuables.upload_object(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    each_user)
                        if config.test_ops['download_object'] is True:
                            log.info('trying to download object: %s' %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info('s3_object_download_path: %s' %
                                     s3_object_download_path)
                            log.info('downloading to filename: %s' %
                                     s3_object_download_name)
                            if config.test_ops.get('encryption_algorithm',
                                                   None) is not None:
                                log.info('encryption download')
                                log.info(
                                    'encryption algorithm: %s' %
                                    config.test_ops['encryption_algorithm'])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        'SSECustomerKey':
                                        encryption_key,
                                        'SSECustomerAlgorithm':
                                        config.test_ops['encryption_algorithm']
                                    })
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    'obj':
                                    bucket,
                                    'resource':
                                    'download_file',
                                    'args':
                                    [s3_object_name, s3_object_download_path],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info('s3_object_downloaded_md5: %s' %
                                     s3_object_downloaded_md5)
                            log.info('s3_object_uploaded_md5: %s' %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info('md5 match')
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError('md5 mismatch')
                        if config.local_file_delete is True:
                            log.info(
                                'deleting local file created after the upload')
                            utils.exec_shell_cmd('rm -rf %s' % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s | grep bucket_id' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace(
                            '"',
                            '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls | grep %s' % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            'got output from sharing verification.--------')
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops['delete_bucket_object'] is True:
                        log.info('listing all objects in bucket: %s' %
                                 bucket.name)
                        objects = s3lib.resource_op({
                            'obj': bucket,
                            'resource': 'objects',
                            'args': None
                        })
                        log.info('objects :%s' % objects)
                        all_objects = s3lib.resource_op({
                            'obj': objects,
                            'resource': 'all',
                            'args': None
                        })
                        log.info('all objects: %s' % all_objects)
                        for obj in all_objects:
                            log.info('object_name: %s' % obj.key)
                        log.info('deleting all objects in bucket')
                        objects_deleted = s3lib.resource_op({
                            'obj': objects,
                            'resource': 'delete',
                            'args': None
                        })
                        log.info('objects_deleted: %s' % objects_deleted)
                        if objects_deleted is False:
                            raise TestExecError(
                                'Resource execution failed: Object deletion failed'
                            )
                        if objects_deleted is not None:
                            response = HttpResponseParser(objects_deleted[0])
                            if response.status_code == 200:
                                log.info('objects deleted ')
                            else:
                                raise TestExecError("objects deletion failed")
                        else:
                            raise TestExecError("objects deletion failed")
                        log.info('deleting bucket: %s' % bucket.name)
                        # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete')
                        bucket_deleted_status = s3lib.resource_op({
                            'obj': bucket,
                            'resource': 'delete',
                            'args': None
                        })
                        log.info('bucket_deleted_status: %s' %
                                 bucket_deleted_status)
                        if bucket_deleted_status is not None:
                            response = HttpResponseParser(
                                bucket_deleted_status)
                            if response.status_code == 204:
                                log.info('bucket deleted ')
                            else:
                                raise TestExecError("bucket deletion failed")
                        else:
                            raise TestExecError("bucket deletion failed")
        # disable compression after test
        if config.test_ops['compression']['enable'] is True:
            log.info('disable compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=none' % zone
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
Beispiel #13
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    test_info = AddTestInfo('create m buckets')
    conf_path = '/etc/ceph/%s.conf' % config.cluster_name
    ceph_conf = CephConfOp(conf_path)
    rgw_service = RGWService()

    try:

        test_info.started_info()

        # get user

        with open('user_details') as fout:
            all_users_info = simplejson.load(fout)

        for each_user in all_users_info:

            user_info = basic_io_structure.user(**{'user_id': each_user['user_id'],
                                                   'access_key': each_user['access_key'],
                                                   'secret_key': each_user['secret_key']})

            write_user_info.add_user_info(user_info)

        for each_user in all_users_info:

            # authenticate

            auth = Auth(each_user)
            rgw_conn = auth.do_auth()

            # enabling sharding

            if config.test_ops['sharding']['enable'] is True:

                    log.info('enabling sharding on buckets')

                    max_shards = config.test_ops['sharding']['max_shards']

                    log.info('making changes to ceph.conf')

                    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_override_bucket_index_max_shards,
                                                 max_shards)

                    log.info('trying to restart services ')

                    srv_restarted = rgw_service.restart()

                    time.sleep(10)

                    if srv_restarted is False:
                        raise TestExecError("RGW service restart failed")
                    else:
                        log.info('RGW service restarted')

            # create buckets

            if config.test_ops['create_bucket'] is True:

                log.info('no of buckets to create: %s' % config.bucket_count)

                for bc in range(config.bucket_count):

                    bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)

                    log.info('creating bucket with name: %s' % bucket_name_to_create)

                    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)

                    bucket = s3lib.resource_op({'obj': rgw_conn,
                                                'resource': 'Bucket',
                                                'args': [bucket_name_to_create]})

                    created = s3lib.resource_op({'obj': bucket,
                                                'resource': 'create',
                                                'args': None,
                                                'extra_info': {'access_key': each_user['access_key']}})

                    if created is False:
                        raise TestExecError("Resource execution failed: bucket creation failed")

                    if created is not None:

                        response = HttpResponseParser(created)

                        if response.status_code == 200:
                           log.info('bucket created')

                        else:
                            raise TestExecError("bucket creation failed")

                    else:
                        raise TestExecError("bucket creation failed")

                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id' \
                              % (bucket.name, config.cluster_name)

                        out = utils.exec_shell_cmd(cmd)

                        b_id = out.replace('"', '').strip().split(":")[1].strip().replace(',', '')

                        cmd2 = 'rados -p default.rgw.buckets.index ls --cluster %s | grep %s' \
                               % (config.cluster_name, b_id)

                        out = utils.exec_shell_cmd(cmd2)

                        log.info('got output from sharing verification.--------')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception,e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 1
    config.rgw_lifecycle_work_time = "00:00-23:59"
    log.info("making changes to ceph.conf")
    ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    log.info("trying to restart services")
    srv_restarted = rgw_service.restart()
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    rgw_service.status()
    # create user
    user_info = s3lib.create_users(config.user_count)
    for each_user in user_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        if config.test_ops["create_bucket"]:
            log.info("no of buckets to create: %s" % config.bucket_count)
            # create bucket
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=1)
                bucket = reusable.create_bucket(bucket_name, rgw_conn,
                                                each_user)
                life_cycle_rule = {"Rules": config.lifecycle_conf}
                reusable.put_bucket_lifecycle(bucket, rgw_conn, rgw_conn2,
                                              life_cycle_rule)
                if config.test_ops["create_object"]:
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        log.info(
                            f"s3 objects to create of size {config.obj_size}")
                        s3_object_name = config.lifecycle_conf[0]["Filter"][
                            "Prefix"] + str(oc)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info(
                            f"s3 object path: {s3_object_path}, name: {s3_object_name}"
                        )
                        reusable.upload_mutipart_object(
                            s3_object_name, bucket, TEST_DATA_PATH, config,
                            each_user)

                for i in (1, 100):
                    time.sleep(60)
                    bucket_details = json.loads(
                        utils.exec_shell_cmd(
                            f"radosgw-admin bucket stats --bucket={bucket.name}"
                        ))
                    if bucket_details["usage"]["rgw.main"]["num_objects"] == 0:
                        break
                else:
                    raise TestExecError(
                        "Bucket object expiration taking longer than expected")

                gc_list_output = json.loads(
                    utils.exec_shell_cmd(
                        "radosgw-admin gc list --include-all"))
                if gc_list_output:
                    log.info("Removing shadow objects found")
                    utils.exec_shell_cmd(
                        "radosgw-admin gc process --include-all")

                bucket_id = (bucket_details["id"] + "_" +
                             config.lifecycle_conf[0]["Filter"]["Prefix"])
                log.info(
                    f"check for all the entry {bucket_id} for the bucket in data pool"
                )
                obj_pool = utils.exec_shell_cmd(
                    f"rados ls -p default.rgw.buckets.data | grep {bucket_id}")
                if obj_pool:
                    for obj in obj_pool:
                        object_name = obj.split("_")[-1]
                        log.info(f"s3 object name to download: {object_name}")
                        object_name_downloaded = object_name + "." + "download"
                        object_download_path = os.path.join(
                            TEST_DATA_PATH, object_name_downloaded)
                        object_downloaded_status = s3lib.resource_op({
                            "obj":
                            bucket,
                            "resource":
                            "download_file",
                            "args": [object_name, object_download_path],
                        })
                        if object_downloaded_status is False:
                            log.info("As expected object is not Downloadable")
                        if object_downloaded_status is None:
                            raise TestExecError(
                                "Objects are not listed but can be downloadable"
                            )

                if config.local_file_delete:
                    log.info("deleting local file created after the upload")
                    utils.exec_shell_cmd(f"rm -rf {TEST_DATA_PATH}")

                reusable.delete_bucket(bucket)
        reusable.remove_user(each_user)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    test_info = AddTestInfo('test versioning with objects')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count, config.cluster_name)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            s3_object_names = []
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = s3lib.resource_op({'obj': rgw_conn,
                                            'resource': 'Bucket',
                                            'args': [bucket_name_to_create]})
                # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
                created = s3lib.resource_op({'obj': bucket,
                                             'resource': 'create',
                                             'args': None,
                                             'extra_info': {'access_key': each_user['access_key']}})
                if created is False:
                    raise TestExecError("Resource execution failed: bucket creation faield")
                if created is not None:
                    response = HttpResponseParser(created)
                    if response.status_code == 200:
                        log.info('bucket created')
                    else:
                        raise TestExecError("bucket creation failed")
                else:
                    raise TestExecError("bucket creation failed")
                # getting bucket version object
                if config.test_ops['enable_version'] is True:
                    log.info('bucket versionig test on bucket: %s' % bucket.name)
                    # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                    bucket_versioning = s3lib.resource_op({'obj': rgw_conn,
                                                           'resource': 'BucketVersioning',
                                                           'args': [bucket.name]})
                    # checking the versioning status
                    # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                    version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                        'resource': 'status',
                                                        'args': None
                                                        })
                    if version_status is None:
                        log.info('bucket versioning still not enabled')
                    # enabling bucket versioning
                    # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                    version_enable_status = s3lib.resource_op({'obj': bucket_versioning,
                                                               'resource': 'enable',
                                                               'args': None})
                    response = HttpResponseParser(version_enable_status)
                    if response.status_code == 200:
                        log.info('version enabled')
                    else:
                        raise TestExecError("version enable failed")
                    if config.objects_count > 0:

                        log.info('s3 objects to create: %s' % config.objects_count)
                        for oc in range(config.objects_count):
                            # versioning upload
                            s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                            s3_object_names.append(s3_object_name)
                            log.info('s3 object name: %s' % s3_object_name)
                            log.info('versioning count: %s' % config.version_count)
                            s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                                                 config.objects_size_range['max'])
                            s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                            s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                            original_data_info = manage_data.io_generator(s3_object_path, s3_object_size)
                            if original_data_info is False:
                                TestExecError("data creation failed")
                            for vc in range(config.version_count):
                                log.info('version count for %s is %s' % (s3_object_name, str(vc)))
                                log.info('modifying data: %s' % s3_object_name)
                                modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size,
                                                                              data='append',
                                                                              **{
                                                                                  'message': '\nhello object for version: %s\n' % str(
                                                                                      vc)})
                                if modified_data_info is False:
                                    TestExecError("data modification failed")
                                log.info('uploading s3 object: %s' % s3_object_path)
                                upload_info = dict({'access_key': each_user['access_key']}, **modified_data_info)
                                object_uploaded_status = s3lib.resource_op({'obj': bucket,
                                                                            'resource': 'upload_file',
                                                                            'args': [modified_data_info['name'],
                                                                                     s3_object_name],
                                                                            'extra_info': upload_info})
                                if object_uploaded_status is False:
                                    raise TestExecError("Resource execution failed: object upload failed")
                                if object_uploaded_status is None:
                                    log.info('object uploaded')
                            log.info('all versions for the object: %s\n' % s3_object_name)
                            versions = bucket.object_versions.filter(Prefix=s3_object_name)
                            for version in versions:
                                log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                            if config.test_ops['copy_to_version'] is True:
                                # reverting object to one of the versions ( randomly chosen )
                                version_id_to_copy = random.choice([v.version_id for v in versions])
                                log.info('version_id_to_copy: %s' % version_id_to_copy)
                                s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                                log.info('current version_id: %s' % s3_obj.version_id)
                                copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name,
                                                                             'Key': s3_object_name,
                                                                             'VersionId': version_id_to_copy})
                                log.info('copy_response: %s' % copy_response)
                                if copy_response is None:
                                    raise TestExecError("copy object from version id failed")
                                # current_version_id = copy_response['VersionID']
                                log.info('current_version_id: %s' % s3_obj.version_id)
                                # delete the version_id_to_copy object
                                s3_obj.delete(VersionId=version_id_to_copy)
                                log.info('all versions for the object after the copy operation: %s\n' % s3_object_name)
                                for version in versions:
                                    log.info(
                                        'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                                # log.info('downloading current s3object: %s' % s3_object_name)
                                # s3_obj.download_file(s3_object_name + ".download")
                            if config.test_ops['delete_object_versions'] is True:
                                log.info('deleting s3_obj keys and its versions')
                                s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                            'resource': 'Object',
                                                            'args': [bucket.name, s3_object_name]})
                                log.info('deleting versions for s3 obj: %s' % s3_object_name)
                                for version in versions:
                                    log.info('trying to delete obj version: %s' % version.version_id)
                                    del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                         'resource': 'delete',
                                                                         'kwargs': dict(VersionId=version.version_id)})
                                    log.info('response:\n%s' % del_obj_version)
                                    if del_obj_version is not None:
                                        response = HttpResponseParser(del_obj_version)
                                        if response.status_code == 204:
                                            log.info('version deleted ')
                                        else:
                                            raise TestExecError("version  deletion failed")
                                    else:
                                        raise TestExecError("version deletion failed")
                    if config.test_ops['suspend_version'] is True:
                        # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                        suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                    'resource': 'suspend',
                                                                    'args': None})
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info('versioning suspended')
                        else:
                            raise TestExecError("version suspend failed")
                if config.test_ops['upload_after_suspend'] is True:

                    log.info('trying to upload after suspending versioning on bucket')
                    for s3_object_name in s3_object_names:
                        # non versioning upload
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_size = utils.get_file_size(config.objects_size_range['min'],
                                                             config.objects_size_range['max'])
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append",
                                                                         **{
                                                                             'message': '\nhello object for non version\n'})
                        if non_version_data_info is False:
                            TestExecError("data creation failed")
                        log.info('uploading s3 object: %s' % s3_object_path)
                        upload_info = dict({'access_key': each_user['access_key']}, **non_version_data_info)
                        object_uploaded_status = s3lib.resource_op({'obj': bucket,
                                                                    'resource': 'upload_file',
                                                                    'args': [non_version_data_info['name'],
                                                                             s3_object_name],
                                                                    'extra_info': upload_info})
                        if object_uploaded_status is False:
                            raise TestExecError("Resource execution failed: object upload failed")
                        if object_uploaded_status is None:
                            log.info('object uploaded')
                        s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                        object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                      'resource': 'download_file',
                                                                      'args': [s3_object_name,
                                                                               s3_object_download_path],
                                                                      })
                        if object_downloaded_status is False:
                            raise TestExecError("Resource execution failed: object download failed")
                        if object_downloaded_status is None:
                            log.info('object downloaded')
                        # checking md5 of the downloaded file
                        s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                        log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                        log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5'])

        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #16
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    log.info("starting IO")
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info("sharding configuration will be added now.")
    if config.sharding_type == "dynamic":
        log.info("sharding type is dynamic")
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info("making changes to ceph.conf")
        ceph_conf.set_to_ceph_conf(
            "global",
            ConfigOpts.rgw_max_objs_per_shard,
            str(config.max_objects_per_shard),
        )
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_dynamic_resharding, "True")
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info("num_shards_expected: %s" % num_shards_expected)
        log.info("trying to restart services ")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")

    config.bucket_count = 1
    objects_created_list = []
    log.info("no of buckets to create: %s" % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1)
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    if config.test_ops.get("enable_version", False):
        log.info("enable bucket version")
        reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info)
    log.info("s3 objects to create: %s" % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
        if config.test_ops.get("enable_version", False):
            reusable.upload_version_object(
                config,
                user_info,
                rgw_conn,
                s3_object_name,
                config.obj_size,
                bucket,
                TEST_DATA_PATH,
            )
        else:
            reusable.upload_object(
                s3_object_name, bucket, TEST_DATA_PATH, config, user_info
            )
        objects_created_list.append((s3_object_name, s3_object_path))

    if config.sharding_type == "manual":
        log.info("sharding type is manual")
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info("in manual sharding")
        cmd_exec = utils.exec_shell_cmd(
            "radosgw-admin bucket reshard --bucket=%s --num-shards=%s "
            "--yes-i-really-mean-it" % (bucket.name, config.shards)
        )
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    sleep_time = 600
    log.info(f"verification starts after waiting for {sleep_time} seconds")
    time.sleep(sleep_time)
    op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name)
    json_doc = json.loads(op)
    bucket_id = json_doc["data"]["bucket"]["bucket_id"]
    op2 = utils.exec_shell_cmd(
        "radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id)
    )
    json_doc2 = json.loads((op2))
    num_shards_created = json_doc2["data"]["bucket_info"]["num_shards"]
    log.info("no_of_shards_created: %s" % num_shards_created)
    if config.sharding_type == "manual":
        if config.shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info("Expected number of shards created")
    if config.sharding_type == "dynamic":
        log.info("Verify if resharding list is empty")
        reshard_list_op = json.loads(utils.exec_shell_cmd("radosgw-admin reshard list"))
        if not reshard_list_op:
            log.info(
                "for dynamic number of shards created should be greater than or equal to number of expected shards"
            )
            log.info("no_of_shards_expected: %s" % num_shards_expected)
            if int(num_shards_created) >= int(num_shards_expected):
                log.info("Expected number of shards created")
        else:
            raise TestExecError("Expected number of shards not created")

    if config.test_ops.get("delete_bucket_object", False):
        if config.test_ops.get("enable_version", False):
            for name, path in objects_created_list:
                reusable.delete_version_object(bucket, name, path, rgw_conn, user_info)
        else:
            reusable.delete_objects(bucket)
        reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl, "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=%s"
                % (zone, compression_type)
            )
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (
                        data["placement_pools"][0]["val"]["compression"]
                        == compression_type
                    ):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (
                            data["placement_pools"][0]["val"]["storage_classes"][
                                "STANDARD"
                            ]["compression_type"]
                            == compression_type
                        ):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                log.info("creating bucket with name: %s" % bucket_name_to_create)
                bucket = reusable.create_bucket(
                    bucket_name_to_create, rgw_conn, each_user
                )
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc
                        )
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" % s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name
                            )
                            log.info(
                                "s3_object_download_path: %s" % s3_object_download_path
                            )
                            log.info(
                                "downloading to filename: %s" % s3_object_download_name
                            )
                            if (
                                config.test_ops.get("encryption_algorithm", None)
                                is not None
                            ):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s"
                                    % config.test_ops["encryption_algorithm"]
                                )
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey": encryption_key,
                                        "SSECustomerAlgorithm": config.test_ops[
                                            "encryption_algorithm"
                                        ],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op(
                                    {
                                        "obj": bucket,
                                        "resource": "download_file",
                                        "args": [
                                            s3_object_name,
                                            s3_object_download_path,
                                        ],
                                    }
                                )
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path
                            )
                            s3_object_uploaded_md5 = utils.get_md5(s3_object_path)
                            log.info(
                                "s3_object_downloaded_md5: %s"
                                % s3_object_downloaded_md5
                            )
                            log.info(
                                "s3_object_uploaded_md5: %s" % s3_object_uploaded_md5
                            )
                            if str(s3_object_uploaded_md5) == str(
                                s3_object_downloaded_md5
                            ):
                                log.info("md5 match")
                                utils.exec_shell_cmd(
                                    "rm -rf %s" % s3_object_download_path
                                )
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info("deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    # verification of shards after upload
                    if config.test_datalog_trim_command is True:
                        cmd = "sudo radosgw-admin datalog trim --shard-id 117 --end-marker 1_1626169668.769402_510233116.1 --debug_ms=1 --debug_rgw=20"
                        out, err = utils.exec_shell_cmd(cmd, debug_info=True)
                        if "Segmentation fault" in err:
                            log.info("Segmentation fault occured")
                            test_info.failed_status("test failed")
                            sys.exit(1)

                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name
                        )
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (
                            out.replace('"', "")
                            .strip()
                            .split(":")[1]
                            .strip()
                            .replace(",", "")
                        )
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info("got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        time.sleep(10)
                        reusable.check_sync_status()
                        reusable.delete_bucket(bucket)
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = (
                "radosgw-admin zone placement modify --rgw-zone=%s "
                "--placement-id=default-placement --compression=none" % zone
            )
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")

        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    test_info = AddTestInfo(
        'create m buckets with n objects with bucket life cycle')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:

        test_info.started_info()

        # create user

        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)

        for each_user in all_users_info:

            # authenticate

            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()

            # create buckets

            if config.test_ops['create_bucket'] is True:

                log.info('no of buckets to create: %s' % config.bucket_count)

                for bc in range(config.bucket_count):

                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=1)
                    bucket = resuables.create_bucket(bucket_name, rgw_conn,
                                                     each_user)

                    if config.test_ops['create_object'] is True:

                        # uploading data

                        log.info('s3 objects to create: %s' %
                                 config.objects_count)

                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(
                                bucket.name, oc)

                            resuables.upload_object(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    each_user)

                    bucket_life_cycle = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'BucketLifecycleConfiguration',
                        'args': [bucket.name]
                    })

                    life_cycle = basic_lifecycle_config(prefix="key",
                                                        days=20,
                                                        id="rul1")

                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle)
                    })

                    log.info('put bucket life cycle:\n%s' %
                             put_bucket_life_cycle)

                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )

                    if put_bucket_life_cycle is not None:

                        response = HttpResponseParser(put_bucket_life_cycle)

                        if response.status_code == 200:
                            log.info('bucket life cycle added')

                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")

                    else:
                        raise TestExecError("bucket lifecycle addition failed")

                    log.info('trying to retrieve bucket lifecycle config')

                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        'get_bucket_lifecycle_configuration',
                        "kwargs":
                        dict(Bucket=bucket.name)
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")

                    if get_bucket_life_cycle_config is not None:

                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)

                        if response.status_code == 200:
                            log.info('bucket life cycle retrieved')

                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")

                    else:
                        raise TestExecError("bucket life cycle retrieved")

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    log.info(type(ceph_conf))
    rgw_service = RGWService()
    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(
        tenant_name=tenant, user_id=user_names[0], displayname=user_names[0]
    )
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    auth = Auth(user_info, config.ssl)
    rgw = auth.do_auth()

    for cc in range(config.container_count):
        if config.version_enable is True:
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global", ConfigOpts.rgw_swift_versioning_enabled, "True"
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
            container_name_old = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=str(cc) + "old"
            )
            log.info(container_name_old)
            container = swiftlib.resource_op(
                {
                    "obj": rgw,
                    "resource": "put_container",
                    "kwargs": dict(container=container_name_old),
                }
            )
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=str(cc) + "new"
            )
            log.info(container_name)
            container = swiftlib.resource_op(
                {
                    "obj": rgw,
                    "resource": "put_container",
                    "args": [
                        container_name,
                        {"X-Versions-Location": container_name_old},
                    ],
                }
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed"
                )
            ls = []
            swift_object_name = ""
            for version_count in range(config.version_count):
                for oc, size in list(config.mapped_sizes.items()):
                    swift_object_name = fill_container(
                        rgw, container_name, user_names[0], oc, cc, size
                    )
                ls = rgw.get_container(container_name_old)
                ls = list(ls)
            if config.copy_version_object is True:
                old_obj_name = ls[1][config.version_count - 2]["name"]
                log.info(old_obj_name)
                container = swiftlib.resource_op(
                    {
                        "obj": rgw,
                        "resource": "copy_object",
                        "kwargs": dict(
                            container=container_name_old,
                            obj=old_obj_name,
                            destination=container_name + "/" + swift_object_name,
                        ),
                    }
                )
                if container is False:
                    raise TestExecError("Resource execution failed")
                log.info("Successfully copied item")
            else:
                current_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format(
                    uid=user_names[0], tenant=tenant, bucket=container_name
                )
                num_obj_current = utils.exec_shell_cmd(current_count)
                num_obj_current = json.loads(num_obj_current)
                num_obj_current = (
                    num_obj_current[0].get("usage").get("rgw.main").get("num_objects")
                )
                old_count = "radosgw-admin bucket stats --uid={uid} --tenant={tenant} --bucket='{bucket}' ".format(
                    uid=user_names[0], tenant=tenant, bucket=container_name_old
                )
                num_obj_old = utils.exec_shell_cmd(old_count)
                num_obj_old = json.loads(num_obj_old)
                num_obj_old = (
                    num_obj_old[0].get("usage").get("rgw.main").get("num_objects")
                )
                version_count_from_config = (
                    config.objects_count * config.version_count
                ) - config.objects_count
                if (num_obj_current == config.objects_count) and (
                    num_obj_old == version_count_from_config
                ):
                    log.info("objects and versioned obbjects are correct")
                else:
                    test_info.failed_status("test failed")

        elif config.object_expire is True:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc
            )
            container = swiftlib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed"
                )
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw,
                    container_name,
                    user_names[0],
                    oc,
                    cc,
                    size,
                    header={"X-Delete-After": 5},
                )
                time.sleep(7)
                container_exists = swiftlib.resource_op(
                    {
                        "obj": rgw,
                        "resource": "get_object",
                        "args": [container_name, swift_object_name],
                    }
                )
                log.info(container_exists)
                if container_exists:
                    msg = "test failed as the objects are still present"
                    test_info.failed_status(msg)
                    raise TestExecError(msg)

        elif config.large_object_upload is True:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc
            )
            container = swiftlib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed"
                )
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw,
                    container_name,
                    user_names[0],
                    oc,
                    cc,
                    size,
                    multipart=True,
                    split_size=config.split_size,
                )
                container_name_new = utils.gen_bucket_name_from_userid(
                    user_info["user_id"], rand_no=str(cc) + "New"
                )
                container = swiftlib.resource_op(
                    {
                        "obj": rgw,
                        "resource": "put_container",
                        "kwargs": dict(container=container_name_new),
                    }
                )
                if container is False:
                    raise TestExecError(
                        "Resource execution failed: container creation failed"
                    )
                container = swiftlib.resource_op(
                    {
                        "obj": rgw,
                        "resource": "put_object",
                        "kwargs": dict(
                            container=container_name_new,
                            obj=swift_object_name,
                            contents=None,
                            headers={
                                "X-Object-Manifest": container_name
                                + "/"
                                + swift_object_name
                                + "/"
                            },
                        ),
                    }
                )
                if container is False:
                    raise TestExecError(
                        "Resource execution failed: container creation failed"
                    )
                if config.large_object_download is True:
                    swift_old_object_path = os.path.join(
                        TEST_DATA_PATH, swift_object_name
                    )
                    swift_object_download_fname = swift_object_name + ".download"
                    log.info("download object name: %s" % swift_object_download_fname)
                    swift_object_download_path = os.path.join(
                        TEST_DATA_PATH, swift_object_download_fname
                    )
                    log.info("download object path: %s" % swift_object_download_path)
                    swift_object_downloaded = rgw.get_object(
                        container_name_new, swift_object_name
                    )
                    with open(swift_object_download_path, "wb") as fp:
                        fp.write(swift_object_downloaded[1])
                    old_object = utils.get_md5(swift_old_object_path)
                    downloaded_obj = utils.get_md5(swift_object_download_path)
                    log.info("s3_object_downloaded_md5: %s" % old_object)
                    log.info("s3_object_uploaded_md5: %s" % downloaded_obj)
                    if str(old_object) == str(downloaded_obj):
                        log.info("md5 match")
                        utils.exec_shell_cmd("rm -rf %s" % swift_object_download_path)
                    else:
                        raise TestExecError("md5 mismatch")

        else:
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc
            )
            container = swiftlib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation failed"
                )
            for oc, size in list(config.mapped_sizes.items()):
                swift_object_name = fill_container(
                    rgw, container_name, user_names[0], oc, cc, size
                )
                # download object
                swift_object_download_fname = swift_object_name + ".download"
                log.info("download object name: %s" % swift_object_download_fname)
                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname
                )
                log.info("download object path: %s" % swift_object_download_path)
                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name
                )
                with open(swift_object_download_path, "w") as fp:
                    fp.write(str(swift_object_downloaded[1]))
                # modify and re-upload
                log.info("appending new message to test_data")
                message_to_append = "adding new msg after download"
                fp = open(swift_object_download_path, "a+")
                fp.write(message_to_append)
                fp.close()
                with open(swift_object_download_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
                # delete object
                log.info("deleting swift object")
                rgw.delete_object(container_name, swift_object_name)
            # delete container
            log.info("deleting swift container")
            rgw.delete_container(container_name)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    reusable.remove_user(tenant_user_info, tenant=tenant)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()

    version_count = 3
    # create user
    s3_user = s3lib.create_users(1)[0]
    # authenticate
    auth = Auth(s3_user, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    b1_name = 'bucky.1e'  # bucket 1
    b1_k1_name = b1_name + ".key.1"  # key1
    b1_k2_name = b1_name + ".key.2"  # key2
    b2_name = 'bucky.2e'  # bucket 2
    b2_k1_name = b2_name + ".key.1"  # key1
    b2_k2_name = b2_name + ".key.2"  # key2
    b1 = resuables.create_bucket(b1_name, rgw_conn, s3_user)
    b2 = resuables.create_bucket(b2_name, rgw_conn, s3_user)
    # enable versioning on b1
    resuables.enable_versioning(b1, rgw_conn, s3_user, write_bucket_io_info)
    # upload object to version enabled bucket b1
    obj_sizes = list(config.mapped_sizes.values())
    config.obj_size = obj_sizes[0]
    for vc in range(version_count):
        resuables.upload_object(b1_k1_name, b1, TEST_DATA_PATH, config, s3_user, append_data=True,
                                append_msg='hello vc count: %s' % str(vc))
    # upload object to non version bucket b2
    config.obj_size = obj_sizes[1]
    resuables.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config, s3_user)
    # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created
    # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present
    b1_k2 = s3lib.resource_op({'obj': rgw_conn,
                               'resource': 'Object',
                               'args': [b1.name, b1_k2_name]})
    b2_k2 = s3lib.resource_op({'obj': rgw_conn,
                               'resource': 'Object',
                               'args': [b2.name, b2_k2_name]})
    log.info('copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket')
    copy_response = b1_k2.copy_from(CopySource={'Bucket': b2.name,
                                                'Key': b2_k1_name, })
    log.info('copy_response: %s' % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info('checking if copies object has version id created')
    b1_k2_version_id = b1_k2.version_id
    log.info('version id: %s' % b1_k2_version_id)
    if b1_k2_version_id is None:
        raise TestExecError('Version ID not created for the copied object on to the versioned enabled bucket')
    else:
        log.info('Version ID created for the copied object on to the versioned bucket')
    all_objects_in_b1 = b1.objects.all()
    log.info('all objects in bucket 1')
    for obj in all_objects_in_b1:
        log.info('object_name: %s' % obj.key)
        versions = b1.object_versions.filter(Prefix=obj.key)
        log.info('displaying all versions of the object')
        for version in versions:
            log.info(
                'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
    log.info('-------------------------------------------')
    log.info('copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket')
    copy_response = b2_k2.copy_from(CopySource={'Bucket': b1.name,
                                                'Key': b1_k1_name, })
    log.info('copy_response: %s' % copy_response)
    if copy_response is None:
        raise TestExecError("copy object failed")
    log.info('checking if copies object has version id created')
    b2_k2_version_id = b2_k2.version_id
    log.info('version id: %s' % b2_k2_version_id)
    if b2_k2_version_id is None:
        log.info('Version ID not created for the copied object on to the non versioned bucket')
    else:
        raise TestExecError('Version ID created for the copied object on to the non versioned bucket')
    all_objects_in_b2 = b2.objects.all()
    log.info('all objects in bucket 2')
    for obj in all_objects_in_b2:
        log.info('object_name: %s' % obj.key)
        versions = b2.object_versions.filter(Prefix=obj.key)
        log.info('displaying all versions of the object')
        for version in versions:
            log.info(
                'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
Beispiel #21
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_sts_key,
                                     sesison_encryption_token)
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts,
                                     True)
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1)
    iam_client = auth.do_auth_iam_client()
    """
    TODO:
    policy_document and role_policy can be used valid dict types.
    need to explore on this. 
    """

    policy_document = (
        '{"Version":"2012-10-17",'
        '"Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/%s"]},'
        '"Action":["sts:AssumeRole"]}]}' % (user2["user_id"]))

    role_policy = ('{"Version":"2012-10-17",'
                   '"Statement":{"Effect":"Allow",'
                   '"Action":"s3:*",'
                   '"Resource":"arn:aws:s3:::*"}}')

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.
        format(user_id=user1["user_id"]))
    utils.exec_shell_cmd(add_caps_cmd)

    # log.info(policy_document)
    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        })
    write_user_info.add_user_info(user_info)

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            bucket = reusable.create_bucket(bucket_name_to_create,
                                            s3_client_rgw,
                                            assumed_role_user_info)
            if config.test_ops["create_object"] is True:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Beispiel #22
0
def test_exec(config):
    test_info = AddTestInfo("Bucket Request Payer")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            s3_object_names = []
            # create buckets
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = resuables.create_bucket(
                    bucket_name=bucket_name_to_create,
                    rgw=rgw_conn,
                    user_info=each_user)
                bucket_request_payer = s3lib.resource_op({
                    "obj": rgw_conn,
                    "resource": "BucketRequestPayment",
                    "args": [bucket.name],
                })
                # change the bucket request payer to 'requester'
                payer = {"Payer": "Requester"}
                response = s3lib.resource_op({
                    "obj":
                    bucket_request_payer,
                    "resource":
                    "put",
                    "kwargs":
                    dict(RequestPaymentConfiguration=payer),
                })
                log.info(response)
                if response is not None:
                    response = HttpResponseParser(response)
                    if response.status_code == 200:
                        log.info("bucket created")
                    else:
                        raise TestExecError(
                            "bucket request payer modification failed")
                else:
                    raise TestExecError(
                        "bucket request payer modification failed")
                payer = bucket_request_payer.payer
                log.info("bucket request payer: %s" % payer)
                if payer != "Requester":
                    TestExecError(
                        "Request payer is not set or changed properly ")
                log.info("s3 objects to create: %s" % config.objects_count)
                if config.objects_count is not None:
                    log.info("objects size range:\n%s" %
                             config.objects_size_range)
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(
                            bucket.name, oc)
                        resuables.upload_object(s3_object_name, bucket,
                                                TEST_DATA_PATH, config,
                                                each_user)
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo('RGW Dynamic Resharding test')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    try:
        test_info.started_info()
        log.info('starting IO')
        config.max_objects_per_shard = 10
        config.no_of_shards = 10
        config.user_count = 1
        user_info = s3lib.create_users(config.user_count)
        user_info = user_info[0]
        auth = Auth(user_info)
        rgw_conn = auth.do_auth()
        config.bucket_count = 1
        log.info('no of buckets to create: %s' % config.bucket_count)
        bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1)
        bucket = create_bucket_with_versioning(rgw_conn, user_info, bucket_name)
        upload_objects(user_info, bucket, config)
        log.info('sharding configuration will be added now.')
        if config.sharding_type == 'online':
            log.info('sharding type is online')
            # for online,
            # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
            # example: objects = 500 ; max object per shard = 10
            # then no of shards should be at least 50 or more
            time.sleep(15)
            log.info('making changes to ceph.conf')
            ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, config.max_objects_per_shard)
            ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                       True)
            num_shards_expected = config.objects_count / config.max_objects_per_shard
            log.info('num_shards_expected: %s' % num_shards_expected)
            log.info('trying to restart services ')
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.sharding_type == 'offline':
            log.info('sharding type is offline')
            # for offline.
            # the number of shards will be the value set in the command.
            time.sleep(15)
            log.info('in offline sharding')
            cmd_exec = utils.exec_shell_cmd('radosgw-admin bucket reshard --bucket=%s --num-shards=%s'
                                            % (bucket.name, config.no_of_shards))
            if cmd_exec is False:
                raise TestExecError("offline resharding command execution failed")
        # upload_objects(user_info, bucket, config)
        log.info('s3 objects to create: %s' % config.objects_count)
        for oc in range(config.objects_count):
            s3_object_name = utils.gen_s3_object_name(bucket.name, config.objects_count + oc)
            resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)
        time.sleep(300)
        log.info('verification starts')
        op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name)
        json_doc = json.loads(op)
        bucket_id = json_doc['data']['bucket']['bucket_id']
        op2 = utils.exec_shell_cmd("radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id))
        json_doc2 = json.loads((op2))
        num_shards_created = json_doc2['data']['bucket_info']['num_shards']
        log.info('no_of_shards_created: %s' % num_shards_created)
        log.info('no_of_shards_expected: %s' % num_shards_expected)
        if config.sharding_type == 'offline':
            if num_shards_expected != num_shards_created:
                raise TestExecError("expected number of shards not created")
            log.info('Expected number of shards created')
        if config.sharding_type == 'online':
            log.info('for online, '
                     'number of shards created should be greater than or equal to number of  expected shards')
            if int(num_shards_created) >= int(num_shards_expected):
                log.info('Expected number of shards created')
            else:
                raise TestExecError('Expected number of shards not created')
        read_io = ReadIOInfo()
        read_io.yaml_fname = 'io_info.yaml'
        read_io.verify_io()
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Beispiel #24
0
import v2.lib.manage_data as manage_data
import v2.lib.resource_op as s3lib
import v2.utils.utils as utils
from v2.lib.exceptions import DefaultDatalogBackingError, MFAVersionError, TestExecError
from v2.lib.rgw_config_opts import ConfigOpts
from v2.lib.s3.write_io_info import (
    BasicIOInfoStructure,
    BucketIoInfo,
    IOInfoInitialize,
    KeyIoInfo,
)
from v2.lib.sync_status import sync_status
from v2.utils.utils import HttpResponseParser, RGWService

io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
write_bucket_io_info = BucketIoInfo()
write_key_io_info = KeyIoInfo()
rgw_service = RGWService()

log = logging.getLogger()


def create_bucket(bucket_name, rgw, user_info):
    log.info("creating bucket with name: %s" % bucket_name)
    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
    bucket = s3lib.resource_op({
        "obj": rgw,
        "resource": "Bucket",
        "args": [bucket_name]
Beispiel #25
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    if config.dbr_scenario == "brownfield":
        user_brownfiled = "brownfield_user"
        all_users_info = s3lib.create_users(config.user_count, user_brownfiled)
    else:
        all_users_info = s3lib.create_users(config.user_count)

    if config.test_ops.get("encryption_algorithm", None) is not None:
        log.info("encryption enabled, making ceph config changes")
        ceph_conf.set_to_ceph_conf("global", ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info("RGW service restarted")
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops["sharding"]["enable"] is True:
            log.info("enabling sharding on buckets")
            max_shards = config.test_ops["sharding"]["max_shards"]
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards),
            )
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.test_ops["compression"]["enable"] is True:
            compression_type = config.test_ops["compression"]["type"]
            log.info("enabling compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=%s" %
                   (zone, compression_type))
            out = utils.exec_shell_cmd(cmd)
            ceph_version = utils.exec_shell_cmd("ceph version").split()[4]
            try:
                data = json.loads(out)
                if ceph_version == "luminous":
                    if (data["placement_pools"][0]["val"]["compression"] ==
                            compression_type):
                        log.info("Compression enabled successfully")

                else:
                    if ceph_version in ["nautilus", "octopus"]:
                        if (data["placement_pools"][0]["val"]
                            ["storage_classes"]["STANDARD"]["compression_type"]
                                == compression_type):
                            log.info("Compression enabled successfully")
            except ValueError as e:
                exit(str(e))
            log.info("trying to restart rgw services ")
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            conf = config.ceph_conf
            reusable.set_gc_conf(ceph_conf, conf)
        if config.dynamic_resharding is True:
            if utils.check_dbr_support():
                log.info("making changes to ceph.conf")
                ceph_conf.set_to_ceph_conf(
                    "global",
                    ConfigOpts.rgw_max_objs_per_shard,
                    str(config.max_objects_per_shard),
                )
                srv_restarted = rgw_service.restart()

        # create buckets
        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                if config.bucket_sync_crash is True:
                    is_primary = utils.is_cluster_primary()
                    if is_primary:
                        bucket_name_to_create = "bkt-crash-check"
                if config.dbr_scenario == "brownfield":
                    bucket_name_to_create = ("brownfield-dynamic-bkt"
                                             if config.dynamic_resharding else
                                             "brownfield-manual-bkt")

                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.dynamic_resharding is True:
                    reusable.check_sync_status()
                    op = utils.exec_shell_cmd(
                        f"radosgw-admin bucket stats --bucket {bucket.name}")
                    json_doc = json.loads(op)
                    old_num_shards = json_doc["num_shards"]
                    log.info(f"no_of_shards_created: {old_num_shards}")
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    if utils.check_dbr_support():
                        if bucket_name_to_create in [
                                "brownfield-dynamic-bkt",
                                "brownfield-manual-bkt",
                        ]:
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            if bool(json_doc["usage"]):
                                num_object = json_doc["usage"]["rgw.main"][
                                    "num_objects"]
                                config.objects_count = (num_object * 2 +
                                                        config.objects_count)
                                config.mapped_sizes = utils.make_mapped_sizes(
                                    config)

                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        if config.test_ops.get("upload_type") == "multipart":
                            log.info("upload type: multipart")
                            reusable.upload_mutipart_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        else:
                            log.info("upload type: normal")
                            reusable.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                        if config.test_ops["download_object"] is True:
                            log.info("trying to download object: %s" %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info("s3_object_download_path: %s" %
                                     s3_object_download_path)
                            log.info("downloading to filename: %s" %
                                     s3_object_download_name)
                            if (config.test_ops.get("encryption_algorithm",
                                                    None) is not None):
                                log.info("encryption download")
                                log.info(
                                    "encryption algorithm: %s" %
                                    config.test_ops["encryption_algorithm"])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        "SSECustomerKey":
                                        encryption_key,
                                        "SSECustomerAlgorithm":
                                        config.
                                        test_ops["encryption_algorithm"],
                                    },
                                )
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    "obj":
                                    bucket,
                                    "resource":
                                    "download_file",
                                    "args": [
                                        s3_object_name,
                                        s3_object_download_path,
                                    ],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info("object downloaded")
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info("s3_object_downloaded_md5: %s" %
                                     s3_object_downloaded_md5)
                            log.info("s3_object_uploaded_md5: %s" %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info("md5 match")
                                utils.exec_shell_cmd("rm -rf %s" %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError("md5 mismatch")
                        if config.local_file_delete is True:
                            log.info(
                                "deleting local file created after the upload")
                            utils.exec_shell_cmd("rm -rf %s" % s3_object_path)
                    if config.reshard_cancel_cmd is True:
                        op = utils.exec_shell_cmd(
                            f"radosgw-admin reshard add --bucket {bucket.name} --num-shards 29"
                        )
                        op = utils.exec_shell_cmd(
                            f"radosgw-admin reshard list")
                        if bucket.name in op:
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard cancel --bucket {bucket.name}"
                            )
                            cancel_op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard list")
                            if bucket.name in cancel_op:
                                raise TestExecError(
                                    "bucket is still in reshard queue")
                        else:
                            raise TestExecError(
                                "Command failed....Bucket is not added into reshard queue"
                            )
                    if config.bucket_sync_crash is True:
                        is_primary = utils.is_cluster_primary()
                        if is_primary is False:
                            crash_info = reusable.check_for_crash()
                            if crash_info:
                                raise TestExecError("ceph daemon crash found!")
                            realm, source_zone = utils.get_realm_source_zone_info(
                            )
                            log.info(f"Realm name: {realm}")
                            log.info(f"Source zone name: {source_zone}")
                            for i in range(
                                    600):  # Running sync command for 600 times
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket sync run --bucket bkt-crash-check --rgw-curl-low-speed-time=0 --source-zone {source_zone} --rgw-realm {realm}"
                                )
                                crash_info = reusable.check_for_crash()
                                if crash_info:
                                    raise TestExecError(
                                        "ceph daemon crash found!")
                                time.sleep(1)
                    if config.dynamic_resharding is True:
                        if utils.check_dbr_support():
                            reusable.check_sync_status()
                            for i in range(10):
                                time.sleep(
                                    60
                                )  # Adding delay for processing reshard list
                                op = utils.exec_shell_cmd(
                                    f"radosgw-admin bucket stats --bucket {bucket.name}"
                                )
                                json_doc = json.loads(op)
                                new_num_shards = json_doc["num_shards"]
                                log.info(
                                    f"no_of_shards_created: {new_num_shards}")
                                if new_num_shards > old_num_shards:
                                    break
                            else:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    if config.manual_resharding is True:
                        if utils.check_dbr_support():
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            old_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {old_num_shards}")
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin reshard add --bucket {bucket.name} --num-shards {config.shards}"
                            )
                            op = utils.exec_shell_cmd(
                                "radosgw-admin reshard process")
                            time.sleep(60)
                            op = utils.exec_shell_cmd(
                                f"radosgw-admin bucket stats --bucket {bucket.name}"
                            )
                            json_doc = json.loads(op)
                            new_num_shards = json_doc["num_shards"]
                            log.info(f"no_of_shards_created: {new_num_shards}")
                            if new_num_shards <= old_num_shards:
                                raise TestExecError(
                                    "num shards are same after processing resharding"
                                )
                    # verification of shards after upload
                    if config.test_datalog_trim_command is True:
                        shard_id, end_marker = reusable.get_datalog_marker()
                        cmd = f"sudo radosgw-admin datalog trim --shard-id {shard_id} --end-marker {end_marker} --debug_ms=1 --debug_rgw=20"
                        out, err = utils.exec_shell_cmd(cmd, debug_info=True)
                        if "Segmentation fault" in err:
                            raise TestExecError("Segmentation fault occured")

                    if config.test_ops["sharding"]["enable"] is True:
                        cmd = (
                            "radosgw-admin metadata get bucket:%s | grep bucket_id"
                            % bucket.name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = (out.replace(
                            '"',
                            "").strip().split(":")[1].strip().replace(",", ""))
                        cmd2 = "rados -p default.rgw.buckets.index ls | grep %s" % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            "got output from sharing verification.--------")
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops["compression"]["enable"] is True:
                        cmd = "radosgw-admin bucket stats --bucket=%s" % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops["delete_bucket_object"] is True:
                        reusable.delete_objects(bucket)
                        time.sleep(10)
                        reusable.check_sync_status()
                        reusable.delete_bucket(bucket)
        if config.modify_user:
            user_id = each_user["user_id"]
            new_display_name = each_user["user_id"] + each_user["user_id"]
            cmd = f"radosgw-admin user modify --uid='{user_id}' --display-name='{new_display_name}'"
            out = utils.exec_shell_cmd(cmd)
            out = json.loads(out)
            if new_display_name == out["display_name"]:
                log.info("User modified successfully")
            else:
                raise TestExecError("Failed to modify user")
        if config.suspend_user:
            user_id = each_user["user_id"]
            cmd = f"radosgw-admin user suspend --uid='{user_id}'"
            out = utils.exec_shell_cmd(cmd)
            out = json.loads(out)
            if out["suspended"] == 1:
                log.info("User got suspended")
            else:
                raise TestExecError("Failed to suspend user")
        if config.enable_user:
            user_id = each_user["user_id"]
            cmd = f"radosgw-admin user enable --uid='{user_id}'"
            out = utils.exec_shell_cmd(cmd)
            out = json.loads(out)
            if out["suspended"] == 0:
                log.info("User enabled successfully")
            else:
                raise TestExecError("Failed to enable user")
        if config.delete_user:
            user_id = each_user["user_id"]
            cmd = f"radosgw-admin user rm --uid='{user_id}'"
            out = utils.exec_shell_cmd(cmd)
            cmd = f"radosgw-admin user list"
            out = utils.exec_shell_cmd(cmd)
            if user_id not in out:
                log.info("User removed successfully")
            else:
                raise TestExecError("Failed to remove user")
        # disable compression after test
        if config.test_ops["compression"]["enable"] is True:
            log.info("disable compression")
            cmd = "radosgw-admin zone get"
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = ("radosgw-admin zone placement modify --rgw-zone=%s "
                   "--placement-id=default-placement --compression=none" %
                   zone)
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.gc_verification is True:
            final_op = reusable.verify_gc()
            if final_op != -1:
                test_info.failed_status("test failed")
                sys.exit(1)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    if config.test_ops.get("upload_type") == "multipart":
        srv_time_pre_op = get_svc_time()

    # create user
    config.user_count = 1
    tenant1 = "MountEverest"
    tenant2 = "Himalayas"
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    for each_user in tenant1_user_info:
        tenant1_user1_information = each_user
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info["user_id"]],
        actions_list=["CreateBucket"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")

    if config.test_ops.get("upload_type") == "multipart":
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            s3_object_name = utils.gen_s3_object_name(t1_u1_bucket1.name, oc)
            log.info("s3 objects to create: %s" % config.objects_count)
            reusable.upload_mutipart_object(
                s3_object_name,
                t1_u1_bucket1,
                TEST_DATA_PATH,
                config,
                tenant1_user1_information,
            )
        srv_time_post_op = get_svc_time()
        log.info(srv_time_pre_op)
        log.info(srv_time_post_op)

        if srv_time_post_op > srv_time_pre_op:
            log.info("Service is running without crash")
        else:
            raise TestExecError("Service got crashed")

    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == "modify":
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info("modifying buckey policy")
        actions_list = ["ListBucket", "CreateBucket"]
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy["Policy"])
        log.info("got bucket policy:%s\n" % modified_policy)
        actions_list_from_modified_policy = modified_policy["Statement"][0][
            "Action"]
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info("cleaned_actions_list_from_modified_policy: %s" %
                 cleaned_actions_list_from_modified_policy)
        log.info("actions list to be modified: %s" % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info("cmp_val: %s" % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == "replace":
        log.info("replacing new bucket policy")
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["ListBucket"],
            resources=[t1_u1_bucket2.name],
        )
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("new bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == "delete":
        log.info("in delete bucket policy")
        delete_policy = s3lib.resource_op({
            "obj": bucket_policy_obj,
            "resource": "delete",
            "args": None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "NoSuchBucketPolicy":
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    log.info(
        "adding indexless placement to placement target of default zonegroup")
    zonegroup_set = utils.exec_shell_cmd(
        'radosgw-admin zonegroup placement add --rgw-zonegroup="default" --placement-id="indexless-placement"'
    )

    log.info("adding indexless placement to placement pool of default zone")
    zone_set = utils.exec_shell_cmd(
        'radosgw-admin zone placement add --rgw-zone="default" --placement-id="indexless-placement" --data-pool="default.rgw.buckets.data" --index-pool="default.rgw.buckets.index" --data_extra_pool="default.rgw.buckets.non-ec" --placement-index-type="indexless"'
    )

    log.info("making indexless-placement as default")
    indexless_default = utils.exec_shell_cmd(
        'radosgw-admin zonegroup placement default --placement-id="indexless-placement"'
    )

    log.info("restart the rgw daemons")
    restart_service = rgw_service.restart()
    if restart_service is False:
        raise TestExecError("RGW service restart failed")
    log.info("sleep for 20 seconds after RGW service restart")
    time.sleep(20)

    # perform s3 operations
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{"signature_version": "s3v4"})
        else:
            rgw_conn = auth.do_auth()

        if config.test_ops["create_bucket"] is True:
            log.info("no of buckets to create: %s" % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc)
                log.info("creating bucket with name: %s" %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops["create_object"] is True:
                    # uploading data
                    log.info("s3 objects to create: %s" % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info("s3 object name: %s" % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info("s3 object path: %s" % s3_object_path)
                        log.info("upload type: normal")
                        reusable.upload_object(s3_object_name, bucket,
                                               TEST_DATA_PATH, config,
                                               each_user)

        # verify the bucket created has index_type = Indexless
        log.info("verify the bucket created has index_type as Indexless")
        bucket_stats = utils.exec_shell_cmd(
            "radosgw-admin bucket stats --bucket %s" % bucket_name_to_create)
        bucket_stats_json = json.loads(bucket_stats)
        bkt_index_type = bucket_stats_json["index_type"]
        if bkt_index_type == "Indexless":
            log.info(f"index_type is Indexless for bucket %s" %
                     bucket_name_to_create)
        else:
            raise TestExecError(" index_type is not Indexless as expected")

        # delete bucket and objects
        if config.test_ops["delete_bucket"] is True:
            log.info("Deleting buckets and objects")
            reusable.delete_bucket(bucket)

    # reverting to default placement group
    log.info("revert changes to zone, zonegroup and default placement target")
    zone_set = utils.exec_shell_cmd(
        'radosgw-admin zone placement rm --rgw-zone="default" --placement-id="indexless-placement" '
    )
    if "indexless" in zone_set:
        raise TestExecError(
            "Indexless placement present in zone even after revert")
    zonegroup_set = utils.exec_shell_cmd(
        'radosgw-admin zonegroup placement rm --rgw-zonegroup="default" --placement-id="indexless-placement"'
    )
    if "indexless" in zonegroup_set:
        raise TestExecError(
            "Indexless placement present in zonegroup even after revert")
    default_placement = utils.exec_shell_cmd(
        'radosgw-admin zonegroup placement default --placement-id="default-placement"'
    )

    log.info("restart the rgw daemons")
    restart_service = rgw_service.restart()
    if restart_service is False:
        raise TestExecError("RGW service restart failed")
    log.info("sleep for 20 seconds after RGW service restart")
    time.sleep(20)

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        objects_created_list = []
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = reusable.create_bucket(bucket_name_to_create,
                                                rgw_conn, each_user)
                if config.test_ops.get('enable_version', False):
                    log.info('enable bucket version')
                    reusable.enable_versioning(bucket, rgw_conn, each_user,
                                               write_bucket_io_info)
                if config.test_ops['create_object'] is True:
                    if config.test_ops['object_structure'] == 'flat':
                        # uploading data
                        log.info('top level s3 objects to create: %s' %
                                 config.objects_count)
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info('s3 object name: %s' % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info('s3 object path: %s' % s3_object_path)
                            if config.test_ops.get(
                                    'upload_type') == 'multipart':
                                log.info('upload type: multipart')
                                reusable.upload_mutipart_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            else:
                                log.info('upload type: normal')
                                reusable.upload_object(s3_object_name, bucket,
                                                       TEST_DATA_PATH, config,
                                                       each_user)
                            objects_created_list.append(
                                (s3_object_name, s3_object_path))
                            #deleting the local file created after upload
                            if config.local_file_delete is True:
                                log.info(
                                    'deleting local file created after the upload'
                                )
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_path)

                    #this covers listing of a bucket with pseudo directories and objects in it ; Unable to list contents of large buckets https://bugzilla.redhat.com/show_bug.cgi?id=1874645#c72
                    if config.test_ops['object_structure'] == 'pseudo':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count} with {config.objects_count} objects in each'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_pseudo_dir_name)
                            manage_data.pseudo_dir_generator(s3_object_path)
                            for oc, size in list(config.mapped_sizes.items()):
                                config.obj_size = size
                                s3_object_name = utils.gen_s3_pseudo_object_name(
                                    s3_pseudo_dir_name, oc)
                                log.info('s3 object name: %s' % s3_object_name)
                                s3_object_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_name)
                                log.info('s3 object path: %s' % s3_object_path)
                                if config.test_ops.get(
                                        'upload_type') == 'multipart':
                                    log.info('upload type: multipart')
                                    reusable.upload_mutipart_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                else:
                                    log.info('upload type: normal')
                                    reusable.upload_object(
                                        s3_object_name, bucket, TEST_DATA_PATH,
                                        config, each_user)
                                #deleting the local file created after upload
                                if config.local_file_delete is True:
                                    log.info(
                                        'deleting local file created after the upload'
                                    )
                                    utils.exec_shell_cmd('rm -rf %s' %
                                                         s3_object_path)

                # listing bucket with only pseudo directories ; Bug allows ordered bucket listing to get stuck -- 4.1 https://bugzilla.redhat.com/show_bug.cgi?id=1853052#c0
                if config.test_ops['create_object'] is False:
                    if config.test_ops[
                            'object_structure'] == 'pseudo-dir-only':
                        log.info(
                            f'pseudo directories to create {config.pseudo_dir_count}'
                        )
                        for count in range(config.pseudo_dir_count):
                            s3_pseudo_dir_name = utils.gen_s3_object_name(
                                bucket_name_to_create, count)
                            utils.create_psuedo_dir(s3_pseudo_dir_name, bucket)

                # radoslist listing of the bucket
                if config.test_ops['radoslist'] is True:
                    log.info(
                        'executing the command radosgw-admin bucket radoslist '
                    )
                    radoslist = utils.exec_shell_cmd(
                        "radosgw-admin bucket radoslist --bucket %s" %
                        bucket_name_to_create)
                    if radoslist is False:
                        raise TestExecError(
                            "Radoslist command execution failed")

                # get the configuration parameter
                cmd = 'ceph daemon `ls -t /var/run/ceph/ceph-client.rgw.*.asok|head -1` config show |grep  rgw_bucket_index_max_aio'
                max_aio_output = utils.exec_shell_cmd(cmd)
                max_aio = max_aio_output.split()[1]

                # bucket stats to get the num_objects of the bucket
                bucket_stats = utils.exec_shell_cmd(
                    "radosgw-admin bucket stats --bucket  %s" %
                    bucket_name_to_create)
                bucket_stats_json = json.loads(bucket_stats)
                bkt_num_objects = bucket_stats_json['usage']['rgw.main'][
                    'num_objects']

                # ordered listing via radosgw-admin command and noting time taken
                log.info(
                    'measure the execution time taken to list via radosgw-admin command'
                )
                if config.test_ops['radosgw_listing_ordered'] is True:
                    log.info('ordered listing via radosgw-admin command')
                    rgw_cmd_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'ordered')
                    if rgw_cmd_time > 0:
                        rgw_cmd_time_secs = "{:.4f}".format(rgw_cmd_time)
                        rgw_cmd_time_mins = "{:.4f}".format(rgw_cmd_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for ordered listing of {bkt_num_objects} objects is : {rgw_cmd_time_secs} secs ; {rgw_cmd_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # unordered listing via radosgw-admin command and noting time taken
                if config.test_ops['radosgw_listing_ordered'] is False:
                    log.info('unordered listing via radosgw-admin command')
                    rgw_time = reusable.time_to_list_via_radosgw(
                        bucket_name_to_create, 'unordered')
                    if rgw_time > 0:
                        rgw_time_secs = "{:.4f}".format(rgw_time)
                        rgw_time_mins = "{:.4f}".format(rgw_time / 60)
                        log.info(
                            f'with rgw_bucket_index_max_aio = {max_aio} time taken for unordered listing of {bkt_num_objects} objects is : {rgw_time_secs} secs ; {rgw_time_mins} mins'
                        )
                    else:
                        raise TestExecError(
                            "object listing via radosgw-admin command failed")

                # listing via boto and noting the time taken
                log.info('measure the execution time taken to list via boto')
                boto_time = reusable.time_to_list_via_boto(
                    bucket_name_to_create, rgw_conn)
                if boto_time > 0:
                    boto_time_secs = "{:.4f}".format(boto_time)
                    boto_time_mins = "{:.4f}".format(boto_time / 60)
                    log.info(
                        f'with rgw_bucket_index_max_aio = {max_aio} time taken to list {bkt_num_objects} objects via boto : {boto_time_secs} secs ; {boto_time_mins} mins'
                    )
                else:
                    raise TestExecError("object listing via boto failed")

        if config.test_ops.get('delete_bucket_object', False):
            if config.test_ops.get('enable_version', False):
                for name, path in objects_created_list:
                    reusable.delete_version_object(bucket, name, path,
                                                   rgw_conn, each_user)
            else:
                reusable.delete_objects(bucket)
                time.sleep(30)
                reusable.delete_bucket(bucket)
    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
    if config.user_remove is True:
        reusable.remove_user(each_user)