예제 #1
0
    def test_bucket_policy_multi_statement(self, mcg_obj, bucket_factory):
        """
        Tests multiple statements in a bucket policy
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        # Creating OBC (account) and Noobaa user account
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)
        noobaa_user = NoobaaAccount(mcg_obj,
                                    name=user_name,
                                    email=email,
                                    buckets=[obc_obj.bucket_name])
        accounts = [obc_obj, noobaa_user]

        # Statement_1 public read access to a bucket
        single_statement_policy = gen_bucket_policy(
            sid="statement-1",
            user_list=["*"],
            actions_list=["GetObject"],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
            effect="Allow",
        )

        # Additional Statements; Statement_2 - PutObject permission on specific user
        # Statement_3 - Denying Permission to DeleteObject action for aultiple Users
        new_statements = {
            "statement_2": {
                "Action": "s3:PutObject",
                "Effect": "Allow",
                "Principal": noobaa_user.email_id,
                "Resource": [f'arn:aws:s3:::{obc_obj.bucket_name}/{"*"}'],
                "Sid": "Statement-2",
            },
            "statement_3": {
                "Action": "s3:DeleteObject",
                "Effect": "Deny",
                "Principal": [obc_obj.obc_account, noobaa_user.email_id],
                "Resource": [f'arn:aws:s3:::{"*"}'],
                "Sid": "Statement-3",
            },
        }

        for key, value in new_statements.items():
            single_statement_policy["Statement"].append(value)

        logger.info(f"New policy {single_statement_policy}")
        bucket_policy = json.dumps(single_statement_policy)

        # Creating Policy
        logger.info(
            f"Creating multi statement bucket policy on bucket: {obc_obj.bucket_name}"
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy "

        # Getting Policy
        logger.info(
            f"Getting multi statement bucket policy from bucket: {obc_obj.bucket_name}"
        )
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # NooBaa user writes an object to bucket
        logger.info(
            f"Writing object on bucket: {obc_obj.bucket_name} with User: {noobaa_user.email_id}"
        )
        assert s3_put_object(noobaa_user, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying public read access
        logger.info(
            f"Reading object on bucket: {obc_obj.bucket_name} with User: {obc_obj.obc_account}"
        )
        assert s3_get_object(obc_obj, obc_obj.bucket_name,
                             object_key), "Failed: Get Object"

        # Verifying Delete object is denied on both Accounts
        for user in accounts:
            logger.info(
                f"Verifying whether S3:DeleteObject action is denied access for {user}"
            )
            try:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            except boto3exception.ClientError as e:
                logger.info(e.response)
                response = HttpResponseParser(e.response)
                if response.error["Code"] == "AccessDenied":
                    logger.info(
                        f"DeleteObject failed due to: {response.error['Message']}"
                    )
                else:
                    raise UnexpectedBehaviour(
                        f"{e.response} received invalid error code {response.error['Code']}"
                    )
예제 #2
0
    def test_mcg_namespace_object_versions(self, mcg_obj, cld_mgr,
                                           ns_resource_factory, bucket_factory,
                                           platform):
        """
        Test object versioning S3 operations on namespace buckets/resources.
        Validates put, get, delete object version operations

        """
        obj_versions = []
        version_key = "ObjKey-" + str(uuid.uuid4().hex)
        total_versions = 10
        aws_s3_resource = boto3.resource(
            "s3",
            endpoint_url=constants.MCG_NS_AWS_ENDPOINT,
            aws_access_key_id=cld_mgr.aws_client.access_key,
            aws_secret_access_key=cld_mgr.aws_client.secret_key,
        )

        namespace_res = ns_resource_factory(platform=platform)

        ns_bucket = bucket_factory(
            amount=1,
            interface="mcg-namespace",
            write_ns_resource=namespace_res[1],
            read_ns_resources=[namespace_res[1]],
        )[0].name
        aws_s3_client = aws_s3_resource.meta.client

        # Put, Get bucket versioning and verify
        logger.info(
            f"Enabling bucket versioning on resource bucket: {namespace_res[0]}"
        )
        assert bucket_utils.s3_put_bucket_versioning(
            s3_obj=mcg_obj,
            bucketname=namespace_res[0],
            status="Enabled",
            s3_client=aws_s3_client,
        ), "Failed: PutBucketVersioning"
        get_ver_res = bucket_utils.s3_get_bucket_versioning(
            s3_obj=mcg_obj,
            bucketname=namespace_res[0],
            s3_client=aws_s3_client)
        logger.info(
            f"Get and verify versioning on resource bucket: {namespace_res[0]}"
        )
        assert get_ver_res[
            "Status"] == "Enabled", "Versioning is not enabled on bucket"

        # Put, List, Get, Delete object version operations
        for i in range(1, total_versions):
            logger.info(f"Writing version {i} of {version_key}")
            obj = bucket_utils.s3_put_object(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                object_key=version_key,
                data=OBJ_DATA,
            )
            obj_versions.append(obj["VersionId"])
        list_ver_resp = bucket_utils.s3_list_object_versions(
            s3_obj=mcg_obj, bucketname=ns_bucket)
        get_list_and_verify(list_ver_resp, obj_versions, "Versions")

        for ver in obj_versions:
            assert bucket_utils.s3_get_object(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                object_key=version_key,
                versionid=ver,
            ), f"Failed to Read object {ver}"
            assert bucket_utils.s3_delete_object(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                object_key=version_key,
                versionid=ver,
            ), f"Failed to Delete object with {ver}"
            logger.info(f"Get and delete version: {ver} of {namespace_res}")

        logger.info(f"Suspending versioning on: {namespace_res[0]}")
        assert bucket_utils.s3_put_bucket_versioning(
            s3_obj=mcg_obj,
            bucketname=namespace_res[0],
            status="Suspended",
            s3_client=aws_s3_client,
        ), "Failed: PutBucketVersioning"
        logger.info(
            f"Verifying versioning is suspended on: {namespace_res[0]}")
        get_version_response = bucket_utils.s3_get_bucket_versioning(
            s3_obj=mcg_obj,
            bucketname=namespace_res[0],
            s3_client=aws_s3_client)
        assert (get_version_response["Status"] == "Suspended"
                ), "Versioning is not suspended on bucket"
예제 #3
0
    def test_bucket_policy_effect_deny(self, mcg_obj, bucket_factory):
        """
        Tests explicit "Deny" effect on bucket policy actions
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc user (account)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Admin writes an object to bucket
        logger.info(
            f"Writing an object on bucket: {obc_obj.bucket_name} by Admin")
        assert s3_put_object(mcg_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: PutObject"

        # Admin sets policy with Effect: Deny on obc bucket with obc-account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=["GetObject"],
            resources_list=[f"{obc_obj.bucket_name}/{object_key}"],
            effect="Deny",
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(
            f"Getting bucket policy from bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get action is denied access
        logger.info(
            f"Verifying whether user: {obc_obj.obc_account} is denied to GetObject"
        )
        try:
            s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("GetObject action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Admin sets a new policy on same obc bucket with same account but with different action and resource
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=["DeleteObject"],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
            effect="Deny",
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(f"Creating bucket policy on bucket: {obc_obj.bucket_name}")
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(
            f"Getting bucket policy from bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether delete action is denied
        logger.info(
            f"Verifying whether user: {obc_obj.obc_account} is denied to Get object"
        )
        try:
            s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Get Object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
예제 #4
0
    def test_bucket_versioning_and_policies(self, mcg_obj, bucket_factory):
        """
        Tests bucket and object versioning on Noobaa buckets and also its related actions
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        object_versions = []

        # Creating a OBC user (Account)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Admin sets a policy on OBC bucket to allow versioning related actions
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=bucket_version_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        # Creating policy
        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin"
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Enabling bucket versioning on {obc_obj.bucket_name} using User: {obc_obj.obc_account}"
        )
        assert s3_put_bucket_versioning(
            s3_obj=obc_obj, bucketname=obc_obj.bucket_name,
            status="Enabled"), "Failed: PutBucketVersioning"

        logger.info(
            f"Verifying whether versioning is enabled on bucket: {obc_obj.bucket_name}"
        )
        assert s3_get_bucket_versioning(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: GetBucketVersioning"

        # Admin modifies the policy to all obc-account to write/read/delete versioned objects
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=object_version_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin"
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f"Getting bucket policy for bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        for key in range(5):
            logger.info(f"Writing {key} version of {object_key}")
            obj = s3_put_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                data=data,
            )
            object_versions.append(obj["VersionId"])

        for obj_ver in object_versions:
            logger.info(f"Reading version: {obj_ver} of {object_key}")
            assert s3_get_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                versionid=obj_ver,
            ), f"Failed: To Read object {obj_ver}"
            logger.info(f"Deleting version: {obj_ver} of {object_key}")
            assert s3_delete_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                versionid=obj_ver,
            ), f"Failed: To Delete object with {obj_ver}"

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=["PutBucketVersioning"],
            resources_list=[obc_obj.bucket_name],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin"
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Suspending bucket versioning on {obc_obj.bucket_name} using User: {obc_obj.obc_account}"
        )
        assert s3_put_bucket_versioning(
            s3_obj=obc_obj, bucketname=obc_obj.bucket_name,
            status="Suspended"), "Failed: PutBucketVersioning"

        # Verifying whether GetBucketVersion action is denied access
        logger.info(
            f"Verifying whether user: {obc_obj.obc_account} is denied to GetBucketVersion"
        )
        try:
            s3_get_bucket_versioning(s3_obj=obc_obj,
                                     bucketname=obc_obj.bucket_name)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Get Object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
예제 #5
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(mcg_obj,
                             name=user_name,
                             email=email,
                             buckets=[obc_obj.bucket_name])

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=[obc_obj.obc_account, user.email_id],
            actions_list=["PutObject"]
            if version.get_semantic_ocs_version_from_config() <=
            version.VERSION_4_6 else ["GetObject", "DeleteObject"],
            effect="Allow" if version.get_semantic_ocs_version_from_config() <=
            version.VERSION_4_6 else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether users can put object
        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {obc_obj.obc_account}"
        )
        assert s3_put_object(obc_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {user.email_id}"
        )
        assert s3_put_object(user, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f" is denied to Get object")
        try:
            if version.get_semantic_ocs_version_from_config(
            ) >= version.VERSION_4_6:
                s3_get_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Get Object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Get object succeeded when it should have failed"

        if version.get_semantic_ocs_version_from_config(
        ) == version.VERSION_4_6:
            logger.info(f"Verifying whether the user: "******"{obc_obj.obc_account} is able to access Get action"
                        f"irrespective of the policy set")
            assert s3_get_object(obc_obj, obc_obj.bucket_name,
                                 object_key), "Failed: Get Object"

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name}"
            f" with key: {object_key} using user: {obc_obj.obc_account}")
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether S3 user is allowed to create multipart
        logger.info(f"Creating multipart on bucket: {obc_obj.bucket_name} "
                    f"with key: {object_key} using user: {user.email_id}")
        create_multipart_upload(user, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f"is denied to Delete object")
        try:
            if version.get_semantic_ocs_version_from_config(
            ) >= version.VERSION_4_6:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Delete action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Delete object succeeded when it should have failed"

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=[user.email_id],
            actions_list=["GetObject", "DeleteObject"]
            if float(config.ENV_DATA["ocs_version"]) <= 4.6 else ["PutObject"],
            effect="Allow" if version.get_semantic_ocs_version_from_config() >=
            version.VERSION_4_6 else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       new_policy)
        logger.info(f"Put bucket policy response from admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f"Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for get_resp in TimeoutSampler(30, 4, s3_get_object, user,
                                       obc_obj.bucket_name, object_key):
            if "403" not in str(
                    get_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("GetObj operation successful")
                break
            else:
                logger.info("GetObj operation is denied access")
        logger.info(
            f"Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for del_resp in TimeoutSampler(30, 4, s3_delete_object, user,
                                       obc_obj.bucket_name, object_key):
            if "403" not in str(
                    del_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("DeleteObj operation successful")
                break
            else:
                logger.info("DeleteObj operation is denied access")

        # Verifying whether Put object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} is denied to Put object after updating policy"
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Put object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
예제 #6
0
    def test_mcg_cache_lifecycle(
        self, mcg_obj, cld_mgr, awscli_pod, bucket_factory, bucketclass_dict
    ):
        """
        Test MCG cache bucket lifecycle

        1. Create cache buckets on each namespace stores (RGW-OBC/OBC)
        2. Verify write operations cache and hub bucket
        3. Verify read/list operations on cache bucket and hub target
        4. Verify delete operation on buckets
        5. Delete multiple cache buckets with data still in ns store
        6. Recreate the cache buckets on ns store(with existing data) then read.

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        if (
            constants.RGW_PLATFORM
            in bucketclass_dict["namespace_policy_dict"]["namespacestore_dict"]
        ):
            s3_creds = {
                "access_key_id": cld_mgr.rgw_client.access_key,
                "access_key": cld_mgr.rgw_client.secret_key,
                "endpoint": cld_mgr.rgw_client.endpoint,
            }
            logger.info("RGW obc will be created as cache bucket")
            obc_interface = "rgw-oc"
        else:
            s3_creds = {
                "access_key_id": cld_mgr.aws_client.access_key,
                "access_key": cld_mgr.aws_client.secret_key,
                "endpoint": constants.MCG_NS_AWS_ENDPOINT,
                "region": config.ENV_DATA["region"],
            }
            logger.info("Noobaa obc will be created as cache bucket")
            obc_interface = bucketclass_dict["interface"]

        # Create the namespace resource and bucket
        ns_bucket = bucket_factory(
            interface=obc_interface,
            bucketclass=bucketclass_dict,
        )[0]
        logger.info(f"Cache bucket: {ns_bucket.name} created")
        target_bucket = ns_bucket.bucketclass.namespacestores[0].uls_name

        # Write to cache
        logger.info(f"Writing object on cache bucket: {ns_bucket.name}")
        assert s3_put_object(
            mcg_obj, ns_bucket.name, object_key, data
        ), "Failed: PutObject"
        wait_for_cache(mcg_obj, ns_bucket.name, [object_key])

        # Write to hub and read from cache
        logger.info("Setting up test files for upload")
        setup_base_objects(awscli_pod, amount=3)
        logger.info(f"Uploading objects to ns target: {target_bucket}")
        sync_object_directory(
            awscli_pod,
            src=MCG_NS_ORIGINAL_DIR,
            target=f"s3://{target_bucket}",
            signed_request_creds=s3_creds,
        )
        sync_object_directory(
            awscli_pod, f"s3://{ns_bucket.name}", MCG_NS_RESULT_DIR, mcg_obj
        )

        # Read cached object
        assert s3_get_object(mcg_obj, ns_bucket.name, object_key), "Failed: GetObject"

        # Read stale object(ttl expired)
        sleep(bucketclass_dict["namespace_policy_dict"]["ttl"] / 1000)
        logger.info(f"Get object on cache bucket: {ns_bucket.name}")
        assert s3_get_object(mcg_obj, ns_bucket.name, object_key), "Failed: GetObject"

        # List on cache bucket
        list_response = s3_list_objects_v1(s3_obj=mcg_obj, bucketname=ns_bucket.name)
        logger.info(f"Listed objects: {list_response}")

        # Delete object from cache bucket
        s3_delete_object(mcg_obj, ns_bucket.name, object_key)
        sleep(5)
        # Try to read deleted object
        try:
            s3_get_object(mcg_obj, ns_bucket.name, object_key)
        except boto3exception.ClientError:
            logger.info("object deleted successfully")

        # Validate deletion on the hub
        if (
            constants.RGW_PLATFORM
            in bucketclass_dict["namespace_policy_dict"]["namespacestore_dict"]
        ):
            obj_list = list(
                cld_mgr.rgw_client.client.Bucket(target_bucket).objects.all()
            )
        else:
            obj_list = list(
                cld_mgr.aws_client.client.Bucket(target_bucket).objects.all()
            )
        if object_key in obj_list:
            raise UnexpectedBehaviour("Object was not deleted from cache properly")

        # Recreate and validate object
        assert s3_put_object(
            mcg_obj, ns_bucket.name, object_key, data
        ), "Failed: PutObject"
        assert s3_get_object(mcg_obj, ns_bucket.name, object_key), "Failed: GetObject"

        logger.info(f"Deleting cache bucket {ns_bucket.name}")
        curr_ns_store = ns_bucket.bucketclass.namespacestores[0]
        ns_bucket.delete()
        new_bucket_class = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Cache",
                "ttl": 180000,
                "namespacestores": [curr_ns_store],
            },
            "placement_policy": {
                "tiers": [{"backingStores": [constants.DEFAULT_NOOBAA_BACKINGSTORE]}]
            },
        }
        logger.info(
            f"Recreating cache bucket {ns_bucket.name} using current hub: {target_bucket}"
        )
        ns_bucket = bucket_factory(
            interface=obc_interface,
            bucketclass=new_bucket_class,
        )[0]
        logger.info(
            f"Read existing data on hub: {target_bucket} through cache bucket: {ns_bucket.name}"
        )
        assert s3_get_object(mcg_obj, ns_bucket.name, object_key), "Failed: GetObject"
예제 #7
0
    def test_mcg_namespace_lifecycle_crd(
        self, mcg_obj, cld_mgr, awscli_pod, bucket_factory, bucketclass_dict
    ):
        """
        Test MCG namespace resource/bucket lifecycle using CRDs

        1. Create namespace resources with CRDs
        2. Create namespace bucket with CRDs
        3. Set bucket policy on namespace bucket with a S3 user principal
        4. Verify bucket policy.
        5. Read/write directly on namespace resource target.
        6. Edit the namespace bucket
        7. Delete namespace resource and bucket

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        if (
            constants.RGW_PLATFORM
            in bucketclass_dict["namespace_policy_dict"]["namespacestore_dict"]
        ):
            s3_creds = {
                "access_key_id": cld_mgr.rgw_client.access_key,
                "access_key": cld_mgr.rgw_client.secret_key,
                "endpoint": cld_mgr.rgw_client.endpoint,
            }
        else:
            s3_creds = {
                "access_key_id": cld_mgr.aws_client.access_key,
                "access_key": cld_mgr.aws_client.secret_key,
                "endpoint": constants.MCG_NS_AWS_ENDPOINT,
                "region": config.ENV_DATA["region"],
            }

        # Noobaa s3 account details
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        # Create the namespace resource and bucket
        ns_bucket = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]
        aws_target_bucket = ns_bucket.bucketclass.namespacestores[0].uls_name
        logger.info(f"Namespace bucket: {ns_bucket.name} created")

        # Noobaa S3 account
        user = NoobaaAccount(
            mcg_obj, name=user_name, email=email, buckets=[ns_bucket.name]
        )
        logger.info(f"Noobaa account: {user.email_id} with S3 access created")

        bucket_policy_generated = gen_bucket_policy(
            user_list=[user.email_id],
            actions_list=["DeleteObject"],
            effect="Deny",
            resources_list=[f'{ns_bucket.name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)
        logger.info(
            f"Creating bucket policy on bucket: {ns_bucket.name} with wildcard (*) Principal"
        )
        put_policy = put_bucket_policy(mcg_obj, ns_bucket.name, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Getting Policy
        logger.info(f"Getting bucket policy on bucket: {ns_bucket.name}")
        get_policy = get_bucket_policy(mcg_obj, ns_bucket.name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # MCG admin writes an object to bucket
        logger.info(f"Writing object on bucket: {ns_bucket.name} by admin")
        assert s3_put_object(
            mcg_obj, ns_bucket.name, object_key, data
        ), "Failed: PutObject"

        # Verifying whether Get & Put object is allowed to S3 user
        logger.info(
            f"Get object action on namespace bucket: {ns_bucket.name}"
            f" with user: {user.email_id}"
        )
        assert s3_get_object(user, ns_bucket.name, object_key), "Failed: GetObject"
        logger.info(
            f"Put object action on namespace bucket: {ns_bucket.name}"
            f" with user: {user.email_id}"
        )
        assert s3_put_object(
            user, ns_bucket.name, object_key, data
        ), "Failed: PutObject"

        # Verifying whether Delete object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} "
            f"is denied to Delete object after updating policy"
        )
        try:
            s3_delete_object(user, ns_bucket.name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Delete object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code "
                    f"{response.error['Code']}"
                )
        else:
            assert (
                False
            ), "Delete object operation was granted access, when it should have denied"

        logger.info("Setting up test files for upload, to the bucket/resources")
        setup_base_objects(awscli_pod, amount=3)

        # Upload files directly to NS resources
        logger.info(
            f"Uploading objects directly to ns resource target: {aws_target_bucket}"
        )
        sync_object_directory(
            awscli_pod,
            src=MCG_NS_ORIGINAL_DIR,
            target=f"s3://{aws_target_bucket}",
            signed_request_creds=s3_creds,
        )

        # Read files directly from NS resources
        logger.info(
            f"Downloading objects directly from ns resource target: {aws_target_bucket}"
        )
        sync_object_directory(
            awscli_pod,
            src=f"s3://{aws_target_bucket}",
            target=MCG_NS_RESULT_DIR,
            signed_request_creds=s3_creds,
        )

        # Edit namespace bucket
        logger.info(f"Editing the namespace resource bucket: {ns_bucket.name}")
        namespace_bucket_update(
            mcg_obj,
            bucket_name=ns_bucket.name,
            read_resource=[aws_target_bucket],
            write_resource=aws_target_bucket,
        )

        # Verify Download after editing bucket
        logger.info(
            f"Downloading objects directly from ns bucket target: {ns_bucket.name}"
        )
        sync_object_directory(
            awscli_pod,
            src=f"s3://{ns_bucket.name}",
            target=MCG_NS_RESULT_DIR,
            s3_obj=mcg_obj,
        )

        # MCG namespace bucket delete
        logger.info(
            f"Deleting all objects on namespace resource bucket: {ns_bucket.name}"
        )
        rm_object_recursive(awscli_pod, ns_bucket.name, mcg_obj)

        # Namespace resource delete
        logger.info(f"Deleting the resource: {aws_target_bucket}")
        mcg_obj.delete_ns_resource(ns_resource_name=aws_target_bucket)