Example #1
0
    def test_multipart_with_policy(self, mcg_obj, bucket_factory):
        """
        Test Multipart upload with bucket policy set on the bucket
        """
        bucket = bucket_factory(interface="OC")[0].name
        obc_obj = OBC(bucket)
        part_body = "Random data-" + str(uuid.uuid4().hex)
        object_key = "MpuObjKey"

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=[
                "ListBucketMultipartUploads",
                "ListMultipartUploadParts",
                "PutObject",
            ],
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
            effect="Allow",
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        # Creates and gets policy
        logger.info(f"Creating policy on bucket: {obc_obj.bucket_name}")
        put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)

        logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Initiating MP Upload on Bucket: {bucket} with Key {object_key}")
        upload_id = create_multipart_upload(obc_obj, bucket, object_key)
        logger.info(
            f"Listing the MP Upload : {list_multipart_upload(obc_obj, bucket)['Uploads']}"
        )

        # Uploading individual part with no body to the Bucket
        logger.info(f"Uploading to the bucket: {bucket}")
        part_etag = obc_obj.s3_client.upload_part(
            Bucket=bucket,
            Key=object_key,
            Body=part_body,
            UploadId=upload_id,
            PartNumber=1,
        )["ETag"]

        # Listing the Uploaded part
        logger.info(
            f"Listing the individual part: {list_uploaded_parts(obc_obj, bucket, object_key, upload_id)['Parts']}"
        )
        uploaded_part = [{"ETag": part_etag, "PartNumber": 1}]

        # Completing the Multipart Upload
        logger.info(f"Completing the MP Upload with on bucket: {bucket}")
        complete_multipart_upload(obc_obj, bucket, object_key, upload_id,
                                  uploaded_part)
Example #2
0
    def test_multipart_upload_operations(self, rgw_endpoint,
                                         awscli_pod_session,
                                         rgw_bucket_factory,
                                         test_directory_setup):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod_session, rgw_bucket_factory, test_directory_setup)
        bucketname = bucket.name
        bucket = OBC(bucketname)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f"Aborting any Multipart Upload on bucket:{bucketname}")
        abort_all_multipart_upload(bucket, bucketname, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f"Initiating Multipart Upload on Bucket: {bucketname} with Key {key}"
        )
        upload_id = create_multipart_upload(bucket, bucketname, key)
        logger.info(
            f"Listing the Multipart Upload: {list_multipart_upload(bucket, bucketname)}"
        )

        # Uploading individual parts to the Bucket
        logger.info(f"Uploading individual parts to the bucket {bucketname}")
        uploaded_parts = upload_parts(bucket, awscli_pod_session, bucketname,
                                      key, res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f"Listing the individual parts: {list_uploaded_parts(bucket, bucketname, key, upload_id)}"
        )

        # Completing the Multipart Upload
        logger.info(f"Completing the Multipart Upload on bucket: {bucketname}")
        logger.info(
            complete_multipart_upload(bucket, bucketname, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            "Downloading the completed multipart object from the RGW bucket to the awscli pod"
        )
        sync_object_directory(awscli_pod_session, object_path, res_dir, bucket)
        assert verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{key}",
            result_object_path=f"{res_dir}/{key}",
            awscli_pod=awscli_pod_session,
        ), "Checksum comparision between original and result object failed"
Example #3
0
    def test_multipart_upload_operations(self, mcg_obj, awscli_pod,
                                         bucket_factory):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod, bucket_factory)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f'Aborting any Multipart Upload on bucket:{bucket}')
        abort_all_multipart_upload(mcg_obj, bucket, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f'Initiating Multipart Upload on Bucket: {bucket} with Key {key}')
        upload_id = create_multipart_upload(mcg_obj, bucket, key)
        logger.info(
            f'Listing the Multipart Upload : {list_multipart_upload(mcg_obj, bucket)}'
        )

        # Uploading individual parts to the Bucket
        logger.info(f'Uploading individual parts to the bucket {bucket}')
        uploaded_parts = upload_parts(mcg_obj, awscli_pod, bucket, key,
                                      res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f'Listing the individual parts : {list_uploaded_parts(mcg_obj, bucket, key, upload_id)}'
        )

        # Completing the Multipart Upload
        logger.info(f'Completing the Multipart Upload on bucket: {bucket}')
        logger.info(
            complete_multipart_upload(mcg_obj, bucket, key, upload_id,
                                      uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            'Downloading the completed multipart object from MCG bucket to awscli pod'
        )
        sync_object_directory(awscli_pod, object_path, res_dir, mcg_obj)
        assert verify_s3_object_integrity(
            original_object_path=f'{origin_dir}/{key}',
            result_object_path=f'{res_dir}/{key}',
            awscli_pod=awscli_pod
        ), 'Checksum comparision between original and result object failed'
Example #4
0
    def test_multipart_with_no_body(self, mcg_obj, bucket_factory):
        """
        Test Multipart upload with no body while uploading the part
        """
        bucket = bucket_factory(amount=1, interface="OC")[0].name
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f"Initiating Multipart Upload on Bucket: {bucket} with Key {object_key}"
        )
        upload_id = create_multipart_upload(mcg_obj, bucket, object_key)
        logger.info(
            f"Listing the Multipart Upload : {list_multipart_upload(mcg_obj, bucket)['Uploads']}"
        )

        # Uploading individual part with no body to the Bucket
        logger.info(
            f"Uploading individual parts to the bucket: {bucket} with no body specified"
        )
        part_etag = mcg_obj.s3_client.upload_part(Bucket=bucket,
                                                  Key=object_key,
                                                  UploadId=upload_id,
                                                  PartNumber=1)["ETag"]
        uploaded_part = [{"ETag": part_etag, "PartNumber": 1}]

        # Listing the Uploaded part
        logger.info(
            f"Listing the individual part: {list_uploaded_parts(mcg_obj, bucket, object_key, upload_id)['Parts']}"
        )

        # Completing the Multipart Upload
        logger.info(
            f"Completing the Multipart Upload with a part and no body on bucket: {bucket}"
        )
        complete_multipart_upload(mcg_obj, bucket, object_key, upload_id,
                                  uploaded_part)
Example #5
0
    def test_mcg_namespace_mpu_crd(self, mcg_obj, awscli_pod, bucket_factory,
                                   bucketclass_dict):
        """
        Test multipart upload S3 operations on namespace buckets(created by CRDs)
        Validates create, upload, upload copy and list parts operations

        """
        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]

        ns_bucket = ns_buc.name

        object_path = f"s3://{ns_bucket}"

        logger.info(
            f"Setting up test files for mpu and aborting any mpu on bucket: {ns_bucket}"
        )
        mpu_key, origin_dir, res_dir, parts = multipart_setup(awscli_pod)
        bucket_utils.abort_all_multipart_upload(mcg_obj, ns_bucket, COPY_OBJ)

        # Initiate mpu, Upload part copy, List and Abort operations
        logger.info(
            f"Put object on bucket: {ns_bucket} to create a copy source")
        assert bucket_utils.s3_put_object(s3_obj=mcg_obj,
                                          bucketname=ns_bucket,
                                          object_key=ROOT_OBJ,
                                          data=OBJ_DATA), "Failed: PutObject"
        logger.info(
            f"Initiating mpu on bucket: {ns_bucket} with key {COPY_OBJ}")
        part_copy_id = bucket_utils.create_multipart_upload(
            mcg_obj, ns_bucket, COPY_OBJ)
        list_mpu_res = bucket_utils.list_multipart_upload(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
        if (constants.AZURE_PLATFORM
                not in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            logger.info(f"Listing in-progress mpu: {list_mpu_res}")
            assert (part_copy_id == list_mpu_res["Uploads"][0]["UploadId"]
                    ), "Invalid UploadId"

        logger.info(f"Uploading a part copy to: {ns_bucket}")
        assert bucket_utils.s3_upload_part_copy(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            copy_source=f"/{ns_bucket}/{ROOT_OBJ}",
            object_key=COPY_OBJ,
            part_number=1,
            upload_id=part_copy_id,
        ), "Failed: upload part copy"

        logger.info(
            f"Aborting initiated multipart upload with id: {part_copy_id}")
        assert bucket_utils.abort_multipart(mcg_obj, ns_bucket, COPY_OBJ,
                                            part_copy_id), "Abort failed"

        # Initiate mpu, Upload part, List parts operations
        logger.info(
            f"Initiating Multipart Upload on Bucket: {ns_bucket} with Key: {mpu_key}"
        )
        mp_upload_id = bucket_utils.create_multipart_upload(
            mcg_obj, ns_bucket, mpu_key)

        list_mpu_res = bucket_utils.list_multipart_upload(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
        if (constants.AZURE_PLATFORM
                not in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            logger.info(f"Listing multipart upload: {list_mpu_res}")
            assert (mp_upload_id == list_mpu_res["Uploads"][0]["UploadId"]
                    ), "Invalid UploadId"

        logger.info(f"Uploading individual parts to the bucket: {ns_bucket}")
        uploaded_parts = bucket_utils.upload_parts(
            mcg_obj=mcg_obj,
            awscli_pod=awscli_pod,
            bucketname=ns_bucket,
            object_key=mpu_key,
            body_path=res_dir,
            upload_id=mp_upload_id,
            uploaded_parts=parts,
        )
        list_parts_res = bucket_utils.list_uploaded_parts(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=mpu_key,
            upload_id=mp_upload_id,
        )
        logger.info(f"Listing individual parts: {list_parts_res['Parts']}")
        for i, ele in enumerate(uploaded_parts):
            assert (ele["PartNumber"] == list_parts_res["Parts"][i]
                    ["PartNumber"]), "Invalid part_number"
            assert ele["ETag"] == list_parts_res["Parts"][i][
                "ETag"], "Invalid ETag"

        logger.info(f"Completing the Multipart Upload on bucket: {ns_bucket}")
        assert bucket_utils.complete_multipart_upload(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=mpu_key,
            upload_id=mp_upload_id,
            parts=uploaded_parts,
        ), "MPU did not complete"

        # Checksum validation after completing MPU
        logger.info(
            f"Downloading the completed multipart object from {ns_bucket} to aws-cli pod"
        )
        bucket_utils.sync_object_directory(podobj=awscli_pod,
                                           src=object_path,
                                           target=res_dir,
                                           s3_obj=mcg_obj)
        assert bucket_utils.verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{mpu_key}",
            result_object_path=f"{res_dir}/{mpu_key}",
            awscli_pod=awscli_pod,
        ), "Checksum comparision between original and result object failed"
Example #6
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['PutObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       bucket_policy)
        logger.info(f'Put bucket policy response from Admin: {put_policy}')

        # Get Policy
        logger.info(f'Getting Bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether obc account can put object
        logger.info(f'Adding object on bucket: {obc_obj.bucket_name}')
        assert s3_put_object(obc_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Get object'
        )
        try:
            s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f'Creating multipart on bucket: {obc_obj.bucket_name} with key: {object_key}'
        )
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Delete object'
        )
        try:
            s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Delete action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(mcg_obj,
                             name=user_name,
                             email=email,
                             buckets=[obc_obj.bucket_name])

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=user.email_id,
            actions_list=['GetObject', 'DeleteObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'])
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       new_policy)
        logger.info(f'Put bucket policy response from admin: {put_policy}')

        # Get Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f'Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}'
        )
        assert s3_get_object(user, obc_obj.bucket_name,
                             object_key), "Failed: Get Object"
        logger.info(
            f'Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}'
        )
        assert s3_delete_object(user, obc_obj.bucket_name,
                                object_key), "Failed: Delete Object"

        # Verifying whether Put object action is denied
        logger.info(
            f'Verifying whether user: {user.email_id} is denied to Put object after updating policy'
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Put object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
Example #7
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(
            mcg_obj, name=user_name, email=email, buckets=[obc_obj.bucket_name]
        )

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=[obc_obj.obc_account, user.email_id],
            actions_list=["PutObject"]
            if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6
            else ["GetObject", "DeleteObject"],
            effect="Allow"
            if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6
            else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether users can put object
        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {obc_obj.obc_account}"
        )
        assert s3_put_object(
            obc_obj, obc_obj.bucket_name, object_key, data
        ), "Failed: Put Object"

        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {user.email_id}"
        )
        assert s3_put_object(
            user, obc_obj.bucket_name, object_key, data
        ), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f" is denied to Get object"
        )
        try:
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6:
                s3_get_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Get Object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Get object succeeded when it should have failed"

        if version.get_semantic_ocs_version_from_config() == version.VERSION_4_6:
            logger.info(
                f"Verifying whether the user: "******"{obc_obj.obc_account} is able to access Get action"
                f"irrespective of the policy set"
            )
            assert s3_get_object(
                obc_obj, obc_obj.bucket_name, object_key
            ), "Failed: Get Object"

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name}"
            f" with key: {object_key} using user: {obc_obj.obc_account}"
        )
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether S3 user is allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name} "
            f"with key: {object_key} using user: {user.email_id}"
        )
        create_multipart_upload(user, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f"is denied to Delete object"
        )
        try:
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Delete action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Delete object succeeded when it should have failed"

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=[user.email_id],
            actions_list=["GetObject", "DeleteObject"]
            if float(config.ENV_DATA["ocs_version"]) <= 4.6
            else ["PutObject"],
            effect="Allow"
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6
            else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, new_policy)
        logger.info(f"Put bucket policy response from admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f"Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for get_resp in TimeoutSampler(
            30, 4, s3_get_object, user, obc_obj.bucket_name, object_key
        ):
            if "403" not in str(get_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("GetObj operation successful")
                break
            else:
                logger.info("GetObj operation is denied access")
        logger.info(
            f"Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for del_resp in TimeoutSampler(
            30, 4, s3_delete_object, user, obc_obj.bucket_name, object_key
        ):
            if "403" not in str(del_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("DeleteObj operation successful")
                break
            else:
                logger.info("DeleteObj operation is denied access")

        # Verifying whether Put object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} is denied to Put object after updating policy"
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Put object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )