Esempio n. 1
0
    def test_check_object_integrity(self, rgw_endpoint, awscli_pod_session,
                                    rgw_bucket_factory, test_directory_setup):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        original_dir = AWSCLI_TEST_OBJ_DIR
        result_dir = test_directory_setup.result_dir
        full_object_path = f"s3://{bucketname}"
        downloaded_files = awscli_pod_session.exec_cmd_on_pod(
            f"ls -A1 {original_dir}").split(" ")
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod_session, original_dir,
                              full_object_path, obc_obj)

        logger.info("Downloading all objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod_session, full_object_path, result_dir,
                              obc_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparision between original and result object failed"
Esempio n. 2
0
 def test_bucket_creation(self, rgw_bucket_factory, amount, interface):
     """
     Test RGW OBC creation using the OC command.
     The factory checks the bucket's health by default.
     """
     obc = rgw_bucket_factory(amount, interface)[0]
     OBC(obc.name)
Esempio n. 3
0
    def test_empty_file_integrity(self, rgw_endpoint, awscli_pod_session,
                                  rgw_bucket_factory, test_directory_setup):
        """
        Test write empty files to bucket and check integrity
        """

        original_dir = test_directory_setup.origin_dir
        result_dir = test_directory_setup.result_dir
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        command = f"for i in $(seq 1 100); do touch {original_dir}/test$i; done"
        awscli_pod_session.exec_sh_cmd_on_pod(command=command, sh="sh")
        # Write all empty objects to the new bucket
        sync_object_directory(awscli_pod_session, original_dir,
                              full_object_path, obc_obj)

        # Retrieve all objects from RGW bucket to result dir in Pod
        logger.info("Downloading objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod_session, full_object_path, result_dir,
                              obc_obj)

        # Checksum is compared between original and result object
        original_md5 = awscli_pod_session.exec_cmd_on_pod(
            f'sh -c "cat {original_dir}/* | md5sum"')
        result_md5 = awscli_pod_session.exec_cmd_on_pod(
            f'sh -c "cat {original_dir}/* | md5sum"')
        assert (original_md5 == result_md5
                ), "Origin and result folders checksum mismatch found"
Esempio n. 4
0
    def test_bucket_policy_actions(self, mcg_obj, bucket_factory):
        """
        Tests user access to Put, Get, Delete bucket policy actions
        """
        # Creating obc and obc object to get account details, keys etc
        obc_name = bucket_factory(amount=1, interface='OC')[0].name
        obc_obj = OBC(obc_name)

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['PutBucketPolicy'],
            resources_list=[obc_obj.bucket_name])
        bucket_policy = json.dumps(bucket_policy_generated)

        # Admin creates a policy on the user bucket, for Action: PutBucketPolicy
        logger.info(
            f'Creating policy by admin on bucket: {obc_obj.bucket_name}')
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       bucket_policy)
        logger.info(f'Put bucket policy response from admin: {put_policy}')

        # Verifying Put bucket policy by user by changing the actions to GetBucketPolicy & DeleteBucketPolicy
        user_generated_policy = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['GetBucketPolicy', 'DeleteBucketPolicy'],
            resources_list=[obc_obj.bucket_name])
        bucket_policy1 = json.dumps(user_generated_policy)

        logger.info(
            f'Changing bucket policy by User on bucket: {obc_obj.bucket_name}')
        put_policy_user = put_bucket_policy(obc_obj, obc_obj.bucket_name,
                                            bucket_policy1)
        logger.info(f'Put bucket policy response from user: {put_policy_user}')

        # Verifying whether user can get the bucket policy after modification
        get_policy = get_bucket_policy(obc_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether user is not allowed Put the bucket policy after modification
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to put objects'
        )
        try:
            put_bucket_policy(obc_obj, obc_obj.bucket_name, bucket_policy1)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info(
                    f'Put bucket policy has been denied access to the user: {obc_obj.obc_account}'
                )
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Verifying whether user can Delete the bucket policy after modification
        logger.info(f'Deleting bucket policy on bucket: {obc_obj.bucket_name}')
        delete_policy = delete_bucket_policy(obc_obj, obc_obj.bucket_name)
        logger.info(f'Delete policy response: {delete_policy}')
Esempio n. 5
0
    def test_bucket_delete_with_objects(self, rgw_endpoint, rgw_bucket_factory,
                                        interface, awscli_pod_session):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucket = rgw_bucket_factory(1, interface)[0]
        bucketname = bucket.name
        obc_obj = OBC(bucketname)
        try:
            data_dir = AWSCLI_TEST_OBJ_DIR
            full_object_path = f"s3://{bucketname}"
            sync_object_directory(awscli_pod_session, data_dir,
                                  full_object_path, obc_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = obc_obj.s3_resource.Bucket(bucketname).delete()
                    assert (
                        not s3_del
                    ), "Unexpected issue: Successfully deleted a bucket containing objects via S3"
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(
                        err), "Couldn't verify delete non-empty OBC with s3"
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
Esempio n. 6
0
    def test_check_object_integrity(self, awscli_pod, rgw_bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, 'rgw-oc')[0].name
        obc_obj = OBC(bucketname)
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {result_dir}')
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(
            awscli_pod, original_dir
        )
        # Write all downloaded objects to the new bucket
        sync_object_directory(
            awscli_pod, original_dir, full_object_path, obc_obj
        )

        logger.info('Downloading all objects from RGW bucket to awscli pod')
        sync_object_directory(
            awscli_pod, full_object_path, result_dir, obc_obj
        )

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}', awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
Esempio n. 7
0
    def test_empty_file_integrity(
        self, awscli_pod, rgw_bucket_factory
    ):
        """
        Test write empty files to bucket and check integrity
        """
        original_dir = '/data'
        result_dir = "/result"
        bucketname = rgw_bucket_factory(1, 'rgw-oc')[0].name
        obc_obj = OBC(bucketname)
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {original_dir} {result_dir}')
        command = "for i in $(seq 1 100); do touch /data/test$i; done"
        awscli_pod.exec_sh_cmd_on_pod(
            command=command,
            sh='sh'
        )
        # Write all empty objects to the new bucket
        sync_object_directory(
            awscli_pod, original_dir, full_object_path, obc_obj
        )

        # Retrieve all objects from RGW bucket to result dir in Pod
        logger.info('Downloading objects from RGW bucket to awscli pod')
        sync_object_directory(
            awscli_pod, full_object_path, result_dir, obc_obj
        )

        # Checksum is compared between original and result object
        original_md5 = awscli_pod.exec_cmd_on_pod(f'sh -c "cat {original_dir}/* | md5sum"')
        result_md5 = awscli_pod.exec_cmd_on_pod(f'sh -c "cat {original_dir}/* | md5sum"')
        assert original_md5 == result_md5, "Origin and result folders checksum mismatch found"
Esempio n. 8
0
    def test_multipart_with_policy(self, mcg_obj, bucket_factory):
        """
        Test Multipart upload with bucket policy set on the bucket
        """
        bucket = bucket_factory(interface="OC")[0].name
        obc_obj = OBC(bucket)
        part_body = "Random data-" + str(uuid.uuid4().hex)
        object_key = "MpuObjKey"

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=[
                "ListBucketMultipartUploads",
                "ListMultipartUploadParts",
                "PutObject",
            ],
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
            effect="Allow",
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        # Creates and gets policy
        logger.info(f"Creating policy on bucket: {obc_obj.bucket_name}")
        put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)

        logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Initiating MP Upload on Bucket: {bucket} with Key {object_key}")
        upload_id = create_multipart_upload(obc_obj, bucket, object_key)
        logger.info(
            f"Listing the MP Upload : {list_multipart_upload(obc_obj, bucket)['Uploads']}"
        )

        # Uploading individual part with no body to the Bucket
        logger.info(f"Uploading to the bucket: {bucket}")
        part_etag = obc_obj.s3_client.upload_part(
            Bucket=bucket,
            Key=object_key,
            Body=part_body,
            UploadId=upload_id,
            PartNumber=1,
        )["ETag"]

        # Listing the Uploaded part
        logger.info(
            f"Listing the individual part: {list_uploaded_parts(obc_obj, bucket, object_key, upload_id)['Parts']}"
        )
        uploaded_part = [{"ETag": part_etag, "PartNumber": 1}]

        # Completing the Multipart Upload
        logger.info(f"Completing the MP Upload with on bucket: {bucket}")
        complete_multipart_upload(obc_obj, bucket, object_key, upload_id,
                                  uploaded_part)
Esempio n. 9
0
    def test_bucket_policy_verify_invalid_scenarios(
        self, mcg_obj, bucket_factory, policy_name, policy_param
    ):
        """
        Test invalid bucket policy scenarios
        """
        # Creating a OBC (Account)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Policy tests invalid/non-existent principal. ie: test-user
        if policy_name == "invalid_principal":
            bucket_policy_generated = gen_bucket_policy(
                user_list=policy_param,
                actions_list=["GetObject"],
                resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
                effect="Allow",
            )
            bucket_policy = json.dumps(bucket_policy_generated)

        # Policy tests invalid/non-existent S3 Action. ie: GetContent
        elif policy_name == "invalid_action":
            bucket_policy_generated = gen_bucket_policy(
                user_list=obc_obj.obc_account,
                actions_list=[policy_param],
                resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
                effect="Allow",
            )
            bucket_policy = json.dumps(bucket_policy_generated)

        # Policy tests invalid/non-existent resource/bucket. ie: new_bucket
        elif policy_name == "invalid_resource":
            bucket_policy_generated = gen_bucket_policy(
                user_list=obc_obj.obc_account,
                actions_list=["GetObject"],
                resources_list=[policy_param],
                effect="Allow",
            )
            bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(f"Verifying Malformed Policy: {policy_name}")
        try:
            put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "MalformedPolicy":
                logger.info(
                    f"PutBucketPolicy failed due to: {response.error['Message']}"
                )
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
Esempio n. 10
0
    def test_multipart_upload_operations(self, rgw_endpoint,
                                         awscli_pod_session,
                                         rgw_bucket_factory,
                                         test_directory_setup):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod_session, rgw_bucket_factory, test_directory_setup)
        bucketname = bucket.name
        bucket = OBC(bucketname)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f"Aborting any Multipart Upload on bucket:{bucketname}")
        abort_all_multipart_upload(bucket, bucketname, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f"Initiating Multipart Upload on Bucket: {bucketname} with Key {key}"
        )
        upload_id = create_multipart_upload(bucket, bucketname, key)
        logger.info(
            f"Listing the Multipart Upload: {list_multipart_upload(bucket, bucketname)}"
        )

        # Uploading individual parts to the Bucket
        logger.info(f"Uploading individual parts to the bucket {bucketname}")
        uploaded_parts = upload_parts(bucket, awscli_pod_session, bucketname,
                                      key, res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f"Listing the individual parts: {list_uploaded_parts(bucket, bucketname, key, upload_id)}"
        )

        # Completing the Multipart Upload
        logger.info(f"Completing the Multipart Upload on bucket: {bucketname}")
        logger.info(
            complete_multipart_upload(bucket, bucketname, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            "Downloading the completed multipart object from the RGW bucket to the awscli pod"
        )
        sync_object_directory(awscli_pod_session, object_path, res_dir, bucket)
        assert verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{key}",
            result_object_path=f"{res_dir}/{key}",
            awscli_pod=awscli_pod_session,
        ), "Checksum comparision between original and result object failed"
Esempio n. 11
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(
            mcg_obj, name=user_name, email=email, buckets=[obc_obj.bucket_name]
        )

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=[obc_obj.obc_account, user.email_id],
            actions_list=["PutObject"]
            if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6
            else ["GetObject", "DeleteObject"],
            effect="Allow"
            if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6
            else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether users can put object
        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {obc_obj.obc_account}"
        )
        assert s3_put_object(
            obc_obj, obc_obj.bucket_name, object_key, data
        ), "Failed: Put Object"

        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {user.email_id}"
        )
        assert s3_put_object(
            user, obc_obj.bucket_name, object_key, data
        ), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f" is denied to Get object"
        )
        try:
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6:
                s3_get_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Get Object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Get object succeeded when it should have failed"

        if version.get_semantic_ocs_version_from_config() == version.VERSION_4_6:
            logger.info(
                f"Verifying whether the user: "******"{obc_obj.obc_account} is able to access Get action"
                f"irrespective of the policy set"
            )
            assert s3_get_object(
                obc_obj, obc_obj.bucket_name, object_key
            ), "Failed: Get Object"

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name}"
            f" with key: {object_key} using user: {obc_obj.obc_account}"
        )
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether S3 user is allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name} "
            f"with key: {object_key} using user: {user.email_id}"
        )
        create_multipart_upload(user, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f"is denied to Delete object"
        )
        try:
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Delete action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Delete object succeeded when it should have failed"

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=[user.email_id],
            actions_list=["GetObject", "DeleteObject"]
            if float(config.ENV_DATA["ocs_version"]) <= 4.6
            else ["PutObject"],
            effect="Allow"
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6
            else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, new_policy)
        logger.info(f"Put bucket policy response from admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f"Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for get_resp in TimeoutSampler(
            30, 4, s3_get_object, user, obc_obj.bucket_name, object_key
        ):
            if "403" not in str(get_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("GetObj operation successful")
                break
            else:
                logger.info("GetObj operation is denied access")
        logger.info(
            f"Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for del_resp in TimeoutSampler(
            30, 4, s3_delete_object, user, obc_obj.bucket_name, object_key
        ):
            if "403" not in str(del_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("DeleteObj operation successful")
                break
            else:
                logger.info("DeleteObj operation is denied access")

        # Verifying whether Put object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} is denied to Put object after updating policy"
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Put object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
Esempio n. 12
0
    def test_bucket_website_and_policies(self, mcg_obj, bucket_factory):
        """
        Tests bucket website bucket policy actions
        """
        # Creating a OBC (account)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets policy with Put/Get bucket website actions
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=bucket_website_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
            effect="Allow")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy for bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(f"Adding bucket website config to: {obc_obj.bucket_name}")
        assert s3_put_bucket_website(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name,
            website_config=website_config), "Failed: PutBucketWebsite"
        logger.info(
            f"Getting bucket website config from: {obc_obj.bucket_name}")
        assert s3_get_bucket_website(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: GetBucketWebsite"

        logger.info("Writing index and error data to the bucket")
        assert s3_put_object(s3_obj=obc_obj,
                             bucketname=obc_obj.bucket_name,
                             object_key="index.html",
                             data=index,
                             content_type='text/html'), "Failed: PutObject"
        assert s3_put_object(s3_obj=obc_obj,
                             bucketname=obc_obj.bucket_name,
                             object_key="error.html",
                             data=error,
                             content_type='text/html'), "Failed: PutObject"

        # Verifying whether DeleteBucketWebsite action is denied access
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to DeleteBucketWebsite'
        )
        try:
            s3_delete_bucket_website(s3_obj=obc_obj,
                                     bucketname=obc_obj.bucket_name)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('GetObject action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Admin modifies policy to allow DeleteBucketWebsite action
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['DeleteBucketWebsite'],
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
            effect="Allow")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy for bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Deleting bucket website config from bucket: {obc_obj.bucket_name}"
        )
        assert s3_delete_bucket_website(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: DeleteBucketWebsite"
Esempio n. 13
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['PutObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       bucket_policy)
        logger.info(f'Put bucket policy response from Admin: {put_policy}')

        # Get Policy
        logger.info(f'Getting Bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether obc account can put object
        logger.info(f'Adding object on bucket: {obc_obj.bucket_name}')
        assert s3_put_object(obc_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Get object'
        )
        try:
            s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f'Creating multipart on bucket: {obc_obj.bucket_name} with key: {object_key}'
        )
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Delete object'
        )
        try:
            s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Delete action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(mcg_obj,
                             name=user_name,
                             email=email,
                             buckets=[obc_obj.bucket_name])

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=user.email_id,
            actions_list=['GetObject', 'DeleteObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'])
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       new_policy)
        logger.info(f'Put bucket policy response from admin: {put_policy}')

        # Get Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f'Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}'
        )
        assert s3_get_object(user, obc_obj.bucket_name,
                             object_key), "Failed: Get Object"
        logger.info(
            f'Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}'
        )
        assert s3_delete_object(user, obc_obj.bucket_name,
                                object_key), "Failed: Delete Object"

        # Verifying whether Put object action is denied
        logger.info(
            f'Verifying whether user: {user.email_id} is denied to Put object after updating policy'
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Put object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
Esempio n. 14
0
    def test_pvpool_cpu_and_memory_modifications(
        self,
        awscli_pod_session,
        backingstore_factory,
        bucket_factory,
        test_directory_setup,
        mcg_obj_session,
    ):
        """
        Test to modify the CPU and Memory resource limits for BS and see if its reflecting
        """
        bucketclass_dict = {
            "interface": "OC",
            "backingstore_dict": {
                "pv": [(
                    1,
                    MIN_PV_BACKINGSTORE_SIZE_IN_GB,
                    "ocs-storagecluster-ceph-rbd",
                )]
            },
        }
        bucket = bucket_factory(1, "OC", bucketclass=bucketclass_dict)[0]
        bucket_name = bucket.name
        pv_backingstore = bucket.bucketclass.backingstores[0]
        pv_bs_name = pv_backingstore.name
        pv_pod_label = f"pool={pv_bs_name}"
        pv_pod_info = get_pods_having_label(
            label=pv_pod_label,
            namespace=config.ENV_DATA["cluster_namespace"])[0]
        pv_pod_obj = Pod(**pv_pod_info)
        pv_pod_name = pv_pod_obj.name
        logger.info(
            f"Pod created for PV Backingstore {pv_bs_name}: {pv_pod_name}")
        new_cpu = "500m"
        new_mem = "500Mi"
        new_resource_patch = {
            "spec": {
                "pvPool": {
                    "resources": {
                        "limits": {
                            "cpu": f"{new_cpu}",
                            "memory": f"{new_mem}",
                        },
                        "requests": {
                            "cpu": f"{new_cpu}",
                            "memory": f"{new_mem}",
                        },
                    }
                }
            }
        }
        try:
            OCP(
                namespace=config.ENV_DATA["cluster_namespace"],
                kind="backingstore",
                resource_name=pv_bs_name,
            ).patch(params=json.dumps(new_resource_patch), format_type="merge")
        except CommandFailed as e:
            logger.error(f"[ERROR] Failed to patch: {e}")
        else:
            logger.info("Patched new resource limits")
        wait_for_pods_to_be_running(
            namespace=config.ENV_DATA["cluster_namespace"],
            pod_names=[pv_pod_name])
        pv_pod_ocp_obj = OCP(namespace=config.ENV_DATA["cluster_namespace"],
                             kind="pod").get(resource_name=pv_pod_name)
        resource_dict = pv_pod_ocp_obj["spec"]["containers"][0]["resources"]
        assert (
            resource_dict["limits"]["cpu"] == new_cpu
            and resource_dict["limits"]["memory"] == new_mem
            and resource_dict["requests"]["cpu"] == new_cpu
            and resource_dict["requests"]["memory"] == new_mem
        ), "New resource modification in Backingstore is not reflected in PV Backingstore Pod!!"
        logger.info(
            "Resource modification reflected in the PV Backingstore Pod!!")

        # push some data to the bucket
        file_dir = test_directory_setup.origin_dir
        copy_random_individual_objects(
            podobj=awscli_pod_session,
            file_dir=file_dir,
            target=f"s3://{bucket_name}",
            amount=1,
            s3_obj=OBC(bucket_name),
        )
Esempio n. 15
0
    def test_basic_bucket_policy_operations(self, mcg_obj, bucket_factory):
        """
        Test Add, Modify, delete bucket policies
        """
        # Creating obc and obc object to get account details, keys etc
        obc_name = bucket_factory(amount=1, interface='OC')[0].name
        obc_obj = OBC(obc_name)

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['GetObject'],
            resources_list=[obc_obj.bucket_name])
        bucket_policy = json.dumps(bucket_policy_generated)

        # Add Bucket Policy
        logger.info(f'Creating bucket policy on bucket: {obc_obj.bucket_name}')
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       bucket_policy)

        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200:
                logger.info('Bucket policy has been created successfully')
            else:
                raise InvalidStatusCode(
                    f"Invalid Status code: {response.status_code}")
        else:
            raise NoBucketPolicyResponse("Put policy response is none")

        # Get bucket policy
        logger.info(f'Getting Bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Modifying bucket policy to take new policy
        logger.info('Modifying bucket policy')
        actions_list = ['ListBucket', 'CreateBucket']
        actions = list(map(lambda action: "s3:%s" % action, actions_list))

        modified_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=actions_list,
            resources_list=[obc_obj.bucket_name])
        bucket_policy_modified = json.dumps(modified_policy_generated)

        put_modified_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                                bucket_policy_modified)

        if put_modified_policy is not None:
            response = HttpResponseParser(put_modified_policy)
            if response.status_code == 200:
                logger.info('Bucket policy has been modified successfully')
            else:
                raise InvalidStatusCode(
                    f"Invalid Status code: {response.status_code}")
        else:
            raise NoBucketPolicyResponse(
                "Put modified policy response is none")

        # Get Modified Policy
        get_modified_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        modified_policy = json.loads(get_modified_policy['Policy'])
        logger.info(f'Got modified bucket policy: {modified_policy}')

        actions_from_modified_policy = modified_policy['statement'][0][
            'action']
        modified_actions = list(map(str, actions_from_modified_policy))
        initial_actions = list(map(str.lower, actions))
        logger.info(f'Actions from modified_policy: {modified_actions}')
        logger.info(f'User provided actions actions: {initial_actions}')
        if modified_actions == initial_actions:
            logger.info("Modified actions and initial actions are same")
        else:
            raise UnexpectedBehaviour(
                'Modification Failed: Action lists are not identical')

        # Delete Policy
        logger.info(
            f'Delete bucket policy by admin on bucket: {obc_obj.bucket_name}')
        delete_policy = delete_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f'Delete policy response: {delete_policy}')

        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 204:
                logger.info('Bucket policy is deleted successfully')
            else:
                raise InvalidStatusCode(
                    f"Invalid Status code: {response.status_code}")
        else:
            raise NoBucketPolicyResponse("Delete policy response is none")

        # Confirming again by calling get_bucket_policy
        try:
            get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'NoSuchBucketPolicy':
                logger.info('Bucket policy has been deleted successfully')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
Esempio n. 16
0
    def test_obc_quota(
        self,
        awscli_pod_session,
        rgw_bucket_factory,
        test_directory_setup,
        mcg_obj_session,
        amount,
        interface,
        quota,
    ):
        """
        Test OBC quota feature
            * create OBC with some quota set
            * check if the quota works
            * change the quota
            * check if the new quota works
        """
        bucket_name = rgw_bucket_factory(amount, interface,
                                         quota=quota)[0].name
        obc_obj = OBC(bucket_name)
        full_bucket_path = f"s3://{bucket_name}"
        amount = int(quota["maxObjects"]) + 1
        test_dir = test_directory_setup.result_dir
        err_msg = "(QuotaExceeded)"
        try:
            copy_random_individual_objects(
                awscli_pod_session,
                pattern="object-",
                file_dir=test_dir,
                target=full_bucket_path,
                amount=amount,
                s3_obj=obc_obj,
                ignore_error=False,
            )
        except CommandFailed as e:
            if err_msg in e.args[0]:
                logger.info(f"Quota {quota} worked as expected!!")
            else:
                logger.error(
                    "ERROR: Copying objects to bucket failed unexpectedly!!")
        else:
            assert (
                False
            ), "Quota didnt work!! Since more than maximum number of objects were written to the bucket!"

        # Patch the OBC to change the quota
        new_quota = 4
        new_quota_str = '{"spec": {"additionalConfig":{"maxObjects": "4"}}}'
        cmd = f"patch obc {bucket_name} -p '{new_quota_str}' -n openshift-storage --type=merge"
        OCP().exec_oc_cmd(cmd)
        logger.info(f"Patched new quota to obc {bucket_name}")

        # check if the new quota applied works
        amount = new_quota - int(quota["maxObjects"])
        awscli_pod_session.exec_cmd_on_pod(f"mkdir -p {test_dir}")
        try:
            copy_random_individual_objects(
                awscli_pod_session,
                pattern="new-object-",
                file_dir=test_dir,
                target=full_bucket_path,
                amount=amount,
                s3_obj=obc_obj,
                ignore_error=False,
            )
        except CommandFailed as e:
            if err_msg in e.args[0]:
                assert False, f"New quota {new_quota_str} didn't get applied!!"
            else:
                logger.error("Copy objects to bucket failed unexpectedly!!")
        else:
            logger.info(f"New quota {new_quota_str} got applied!!")
Esempio n. 17
0
    def test_bucket_policy_multi_statement(self, mcg_obj, bucket_factory):
        """
        Tests multiple statements in a bucket policy
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        # Creating OBC (account) and Noobaa user account
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)
        noobaa_user = NoobaaAccount(mcg_obj,
                                    name=user_name,
                                    email=email,
                                    buckets=[obc_obj.bucket_name])
        accounts = [obc_obj, noobaa_user]

        # Statement_1 public read access to a bucket
        single_statement_policy = gen_bucket_policy(
            sid="statement-1",
            user_list=["*"],
            actions_list=['GetObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
            effect="Allow")

        # Additional Statements; Statement_2 - PutObject permission on specific user
        # Statement_3 - Denying Permission to DeleteObject action for aultiple Users
        new_statements = {
            "statement_2": {
                'Action': 's3:PutObject',
                'Effect': 'Allow',
                'Principal': noobaa_user.email_id,
                'Resource': [f'arn:aws:s3:::{obc_obj.bucket_name}/{"*"}'],
                'Sid': 'Statement-2'
            },
            "statement_3": {
                'Action': 's3:DeleteObject',
                'Effect': 'Deny',
                'Principal': [obc_obj.obc_account, noobaa_user.email_id],
                'Resource': [f'arn:aws:s3:::{"*"}'],
                'Sid': 'Statement-3'
            }
        }

        for key, value in new_statements.items():
            single_statement_policy["Statement"].append(value)

        logger.info(f"New policy {single_statement_policy}")
        bucket_policy = json.dumps(single_statement_policy)

        # Creating Policy
        logger.info(
            f'Creating multi statement bucket policy on bucket: {obc_obj.bucket_name}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy "

        # Getting Policy
        logger.info(
            f'Getting multi statement bucket policy from bucket: {obc_obj.bucket_name}'
        )
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # NooBaa user writes an object to bucket
        logger.info(
            f'Writing object on bucket: {obc_obj.bucket_name} with User: {noobaa_user.email_id}'
        )
        assert s3_put_object(noobaa_user, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying public read access
        logger.info(
            f'Reading object on bucket: {obc_obj.bucket_name} with User: {obc_obj.obc_account}'
        )
        assert s3_get_object(obc_obj, obc_obj.bucket_name,
                             object_key), "Failed: Get Object"

        # Verifying Delete object is denied on both Accounts
        for user in accounts:
            logger.info(
                f"Verifying whether S3:DeleteObject action is denied access for {user}"
            )
            try:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            except boto3exception.ClientError as e:
                logger.info(e.response)
                response = HttpResponseParser(e.response)
                if response.error['Code'] == 'AccessDenied':
                    logger.info(
                        f"DeleteObject failed due to: {response.error['Message']}"
                    )
                else:
                    raise UnexpectedBehaviour(
                        f"{e.response} received invalid error code {response.error['Code']}"
                    )
Esempio n. 18
0
    def test_bucket_policy_effect_deny(self, mcg_obj, bucket_factory):
        """
        Tests explicit "Deny" effect on bucket policy actions
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc user (account)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin writes an object to bucket
        logger.info(
            f'Writing an object on bucket: {obc_obj.bucket_name} by Admin')
        assert s3_put_object(mcg_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: PutObject"

        # Admin sets policy with Effect: Deny on obc bucket with obc-account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['GetObject'],
            resources_list=[f'{obc_obj.bucket_name}/{object_key}'],
            effect="Deny")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(
            f'Getting bucket policy from bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get action is denied access
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to GetObject'
        )
        try:
            s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('GetObject action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Admin sets a new policy on same obc bucket with same account but with different action and resource
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['DeleteObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
            effect="Deny")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(f'Creating bucket policy on bucket: {obc_obj.bucket_name}')
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(
            f'Getting bucket policy from bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether delete action is denied
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Get object'
        )
        try:
            s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
Esempio n. 19
0
    def test_rgw_kafka_notifications(self, bucket_factory):
        """
        Test to verify rgw kafka notifications

        """
        # Get sc
        sc = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)

        # Deploy amq cluster
        self.amq.setup_amq_cluster(sc.name)

        # Create topic
        self.kafka_topic = self.amq.create_kafka_topic()

        # Create Kafkadrop pod
        (
            self.kafkadrop_pod,
            self.kafkadrop_pod,
            self.kafkadrop_route,
        ) = self.amq.create_kafkadrop()

        # Get the kafkadrop route
        kafkadrop_host = self.kafkadrop_route.get().get("spec").get("host")

        # Create bucket
        bucketname = bucket_factory(amount=1, interface="RGW-OC")[0].name

        # Get RGW credentials
        rgw_obj = RGW()
        rgw_endpoint, access_key, secret_key = rgw_obj.get_credentials()

        # Clone notify repo
        notify_path = clone_notify()

        # Initialise to put objects
        data = "A random string data to write on created rgw bucket"
        obc_obj = OBC(bucketname)
        s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=rgw_endpoint,
            aws_access_key_id=obc_obj.access_key_id,
            aws_secret_access_key=obc_obj.access_key,
        )
        s3_client = s3_resource.meta.client

        # Initialize notify command to run
        notify_cmd = (
            f"python {notify_path} -e {rgw_endpoint} -a {obc_obj.access_key_id} "
            f"-s {obc_obj.access_key} -b {bucketname} -ke {constants.KAFKA_ENDPOINT} -t {self.kafka_topic.name}"
        )
        log.info(f"Running cmd {notify_cmd}")

        # Put objects to bucket
        assert s3_client.put_object(Bucket=bucketname, Key="key-1",
                                    Body=data), "Failed: Put object: key-1"
        exec_cmd(notify_cmd)

        # Validate rgw logs notification are sent
        # No errors are seen
        pattern = "ERROR: failed to create push endpoint"
        rgw_pod_obj = get_rgw_pods()
        rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw")
        assert re.search(pattern=pattern, string=rgw_log) is None, (
            f"Error: {pattern} msg found in the rgw logs."
            f"Validate {pattern} found on rgw logs and also "
            f"rgw bucket notification is working correctly")
        assert s3_client.put_object(Bucket=bucketname, Key="key-2",
                                    Body=data), "Failed: Put object: key-2"
        exec_cmd(notify_cmd)

        # Validate message are received Kafka side using curl command
        # A temporary way to check from Kafka side, need to check from UI
        curl_command = (
            f"curl -X GET {kafkadrop_host}/topic/{self.kafka_topic.name} "
            "-H 'content-type: application/vnd.kafka.json.v2+json'")
        json_output = run_cmd(cmd=curl_command)
        new_string = json_output.split()
        messages = new_string[new_string.index("messages</td>") + 1]
        if messages.find("1") == -1:
            raise Exception(
                "Error: Messages are not recieved from Kafka side."
                "RGW bucket notification is not working as expected.")
Esempio n. 20
0
    def test_rgw_kafka_notifications(self, bucket_factory):
        """
        Test to verify rgw kafka notifications

        """
        # Get sc
        sc = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)

        # Deploy amq cluster
        self.amq.setup_amq_cluster(sc.name)

        # Create topic
        self.kafka_topic = self.amq.create_kafka_topic()

        # Create Kafkadrop pod
        (
            self.kafkadrop_pod,
            self.kafkadrop_pod,
            self.kafkadrop_route,
        ) = self.amq.create_kafkadrop()

        # Get the kafkadrop route
        kafkadrop_host = self.kafkadrop_route.get().get("spec").get("host")

        # Create bucket
        bucketname = bucket_factory(amount=1, interface="RGW-OC")[0].name

        # Get RGW credentials
        rgw_obj = RGW()
        rgw_endpoint, access_key, secret_key = rgw_obj.get_credentials()

        # Clone notify repo
        notify_path = clone_notify()

        # Initialise to put objects
        data = "A random string data to write on created rgw bucket"
        obc_obj = OBC(bucketname)
        s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=rgw_endpoint,
            aws_access_key_id=obc_obj.access_key_id,
            aws_secret_access_key=obc_obj.access_key,
        )
        s3_client = s3_resource.meta.client

        # Initialize notify command to run
        notify_cmd = (
            f"python {notify_path} -e {rgw_endpoint} -a {obc_obj.access_key_id} "
            f"-s {obc_obj.access_key} -b {bucketname} -ke {constants.KAFKA_ENDPOINT} -t {self.kafka_topic.name}"
        )
        log.info(f"Running cmd {notify_cmd}")

        # Put objects to bucket
        assert s3_client.put_object(Bucket=bucketname, Key="key-1",
                                    Body=data), "Failed: Put object: key-1"
        exec_cmd(notify_cmd)

        # Validate rgw logs notification are sent
        # No errors are seen
        pattern = "ERROR: failed to create push endpoint"
        rgw_pod_obj = get_rgw_pods()
        rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw")
        assert re.search(pattern=pattern, string=rgw_log) is None, (
            f"Error: {pattern} msg found in the rgw logs."
            f"Validate {pattern} found on rgw logs and also "
            f"rgw bucket notification is working correctly")
        assert s3_client.put_object(Bucket=bucketname, Key="key-2",
                                    Body=data), "Failed: Put object: key-2"
        exec_cmd(notify_cmd)

        # Validate message are received Kafka side using curl command
        # A temporary way to check from Kafka side, need to check from UI
        curl_command = (
            f"curl -X GET {kafkadrop_host}/topic/{self.kafka_topic.name} "
            "-H 'content-type: application/vnd.kafka.json.v2+json'")
        json_output = run_cmd(cmd=curl_command)
        new_string = json_output.split()
        messages = new_string[new_string.index("messages</td>") + 1]
        if messages.find("1") == -1:
            raise Exception(
                "Error: Messages are not recieved from Kafka side."
                "RGW bucket notification is not working as expected.")

        # Validate the timestamp events
        ocs_version = config.ENV_DATA["ocs_version"]
        if Version.coerce(ocs_version) >= Version.coerce("4.8"):
            cmd = (
                f"bin/kafka-console-consumer.sh --bootstrap-server {constants.KAFKA_ENDPOINT} "
                f"--topic {self.kafka_topic.name} --from-beginning --timeout-ms 20000"
            )
            pod_list = get_pod_name_by_pattern(
                pattern="my-cluster-zookeeper",
                namespace=constants.AMQ_NAMESPACE)
            zookeeper_obj = get_pod_obj(name=pod_list[0],
                                        namespace=constants.AMQ_NAMESPACE)
            event_obj = zookeeper_obj.exec_cmd_on_pod(command=cmd)
            log.info(f"Event obj: {event_obj}")
            event_time = event_obj.get("Records")[0].get("eventTime")
            format_string = "%Y-%m-%dT%H:%M:%S.%fZ"
            try:
                datetime.strptime(event_time, format_string)
            except ValueError as ef:
                log.error(
                    f"Timestamp event {event_time} doesnt match the pattern {format_string}"
                )
                raise ef

            log.info(
                f"Timestamp event {event_time} matches the pattern {format_string}"
            )
Esempio n. 21
0
    def test_bucket_versioning_and_policies(self, mcg_obj, bucket_factory):
        """
        Tests bucket and object versioning on Noobaa buckets and also its related actions
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        object_versions = []

        # Creating a OBC user (Account)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets a policy on OBC bucket to allow versioning related actions
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=bucket_version_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ])
        bucket_policy = json.dumps(bucket_policy_generated)

        # Creating policy
        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f'Enabling bucket versioning on {obc_obj.bucket_name} using User: {obc_obj.obc_account}'
        )
        assert s3_put_bucket_versioning(
            s3_obj=obc_obj, bucketname=obc_obj.bucket_name,
            status="Enabled"), "Failed: PutBucketVersioning"

        logger.info(
            f'Verifying whether versioning is enabled on bucket: {obc_obj.bucket_name}'
        )
        assert s3_get_bucket_versioning(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: GetBucketVersioning"

        # Admin modifies the policy to all obc-account to write/read/delete versioned objects
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=object_version_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy for bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        for key in range(5):
            logger.info(f"Writing {key} version of {object_key}")
            obj = s3_put_object(s3_obj=obc_obj,
                                bucketname=obc_obj.bucket_name,
                                object_key=object_key,
                                data=data)
            object_versions.append(obj['VersionId'])

        for version in object_versions:
            logger.info(f"Reading version: {version} of {object_key}")
            assert s3_get_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                versionid=version), f"Failed: To Read object {version}"
            logger.info(f"Deleting version: {version} of {object_key}")
            assert s3_delete_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                versionid=version), f"Failed: To Delete object with {version}"

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['PutBucketVersioning'],
            resources_list=[obc_obj.bucket_name])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Suspending bucket versioning on {obc_obj.bucket_name} using User: {obc_obj.obc_account}"
        )
        assert s3_put_bucket_versioning(
            s3_obj=obc_obj, bucketname=obc_obj.bucket_name,
            status="Suspended"), "Failed: PutBucketVersioning"

        # Verifying whether GetBucketVersion action is denied access
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to GetBucketVersion'
        )
        try:
            s3_get_bucket_versioning(s3_obj=obc_obj,
                                     bucketname=obc_obj.bucket_name)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )