def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    # Adding caps for user1
    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    # user1 auth with iam_client
    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    # policy document
    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])
    print(policy_document)

    # role policy
    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")
    print(role_policy)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    # role creation happens here
    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    # Put role policy happening here
    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    # bucket creation operations now
    bucket_name = "testbucket" + user1["user_id"]

    # authenticating user1 for bucket creation operation
    auth = Auth(user1, ssl=config.ssl)
    user1_info = {
        "access_key": user1["access_key"],
        "secret_key": user1["secret_key"],
        "user_id": user1["user_id"],
    }
    s3_client_u1 = auth.do_auth()

    # bucket creation operation
    bucket = reusable.create_bucket(bucket_name, s3_client_u1, user1_info)

    # uploading objects to the bucket
    if config.test_ops["create_object"]:
        # uploading data
        log.info("s3 objects to create: %s" % config.objects_count)
        for oc, size in list(config.mapped_sizes.items()):
            config.obj_size = size
            s3_object_name = utils.gen_s3_object_name(bucket_name, oc)
            log.info("s3 object name: %s" % s3_object_name)
            s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
            log.info("s3 object path: %s" % s3_object_path)
            if config.test_ops.get("upload_type") == "multipart":
                log.info("upload type: multipart")
                reusable.upload_mutipart_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )
            else:
                log.info("upload type: normal")
                reusable.upload_object(
                    s3_object_name,
                    bucket,
                    TEST_DATA_PATH,
                    config,
                    user1_info,
                )

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )
    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }
    log.info("got the credentials after assume role")

    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client = s3client.do_auth_using_client()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    unexisting_object = bucket_name + "_unexisting_object"
    try:
        response = s3_client.head_object(Bucket=bucket_name, Key=unexisting_object)
    except botocore.exceptions.ClientError as e:
        response_code = e.response["Error"]["Code"]
        log.info(response_code)
        if e.response["Error"]["Code"] == "404":
            log.info("404 Unexisting Object Not Found")
        elif e.response["Error"]["Code"] == "403":
            raise TestExecError("Error code : 403 - HeadObject operation: Forbidden")
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])

    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    buckets_created = []

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc
            )
            log.info("creating bucket with name: %s" % bucket_name)
            bucket = reusable.create_bucket(
                bucket_name, s3_client_rgw, assumed_role_user_info
            )
            buckets_created.append(bucket)

        if config.test_ops["create_object"] is True:
            for bucket in buckets_created:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    if config.test_ops["server_side_copy"] is True:
        bucket1, bucket2 = buckets_created

        # copy object1 from bucket1 to bucket2 with the same name as in bucket1
        log.info("copying first object from bucket1 to bucket2")
        all_keys_in_buck1 = []
        for obj in bucket1.objects.all():
            all_keys_in_buck1.append(obj.key)
        copy_source = {"Bucket": bucket1.name, "Key": all_keys_in_buck1[0]}
        copy_object_name = all_keys_in_buck1[0] + "_copied_obj"
        log.info(f"copy object name: {copy_object_name}")
        bucket2.copy(copy_source, copy_object_name)

        # list the objects in bucket2
        log.info("listing all objects im bucket2 after copy")
        all_bucket2_objs = []
        for obj in bucket2.objects.all():
            log.info(obj.key)
            all_bucket2_objs.append(obj.key)

        # check for object existence in bucket2
        if copy_object_name in all_bucket2_objs:
            log.info("server side copy successful")
        else:
            raise TestExecError("server side copy operation was not successful")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
示例#3
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_sts_key,
                                     sesison_encryption_token)
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts,
                                     True)
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1)
    iam_client = auth.do_auth_iam_client()
    """
    TODO:
    policy_document and role_policy can be used valid dict types.
    need to explore on this. 
    """

    policy_document = (
        '{"Version":"2012-10-17",'
        '"Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/%s"]},'
        '"Action":["sts:AssumeRole"]}]}' % (user2["user_id"]))

    role_policy = ('{"Version":"2012-10-17",'
                   '"Statement":{"Effect":"Allow",'
                   '"Action":"s3:*",'
                   '"Resource":"arn:aws:s3:::*"}}')

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.
        format(user_id=user1["user_id"]))
    utils.exec_shell_cmd(add_caps_cmd)

    # log.info(policy_document)
    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        })
    write_user_info.add_user_info(user_info)

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc)
            log.info("creating bucket with name: %s" % bucket_name_to_create)
            bucket = reusable.create_bucket(bucket_name_to_create,
                                            s3_client_rgw,
                                            assumed_role_user_info)
            if config.test_ops["create_object"] is True:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH,
                                                  s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")