コード例 #1
0
def setup_objects_to_list(mcg_obj, bucket_name, amount=100, prefix=""):
    """
    Puts large amount of objects to the bucket to list.

     Args:
        mcg_obj (obj): MCG object
        amount (int): Number of test objects to create
        bucket_name (str): Name of the bucket
        prefix (str): Name of the prefix

    Returns:
        Tuple: Returns tuple containing the keys, prefixes and index

    """
    object_keys = []
    object_prefixes = []
    for i in range(amount):
        obj_key = f"{prefix}-{i}/ObjKey-{i}"
        bucket_utils.s3_put_object(s3_obj=mcg_obj,
                                   bucketname=bucket_name,
                                   object_key=obj_key,
                                   data=OBJ_DATA)
        object_keys.append(obj_key)
        object_prefixes.append(f"{prefix}-{i}/")
    mid_index = len(object_keys) // 2
    return object_keys, object_prefixes, mid_index
コード例 #2
0
    def test_content_encoding_with_write(self, file_setup, bucket_factory,
                                         mcg_obj_session):
        """
        Test s3 put object operation to see if the content-encoding is stored as object
        metadata after put
        """
        # create bucket
        bucket_name = bucket_factory()[0].name
        logger.info(f"Bucket created {bucket_name}")

        # create a random file and then zip it
        filename = file_setup
        logger.info(f"Random zip file generated : {filename}")

        # put object to the bucket created
        s3_put_object(
            s3_obj=mcg_obj_session,
            bucketname=bucket_name,
            object_key=f"{filename}",
            data=f"{filename}",
            content_encoding="zip",
        )

        # head object to see if the content-encoding is preserved
        head_obj = s3_head_object(s3_obj=mcg_obj_session,
                                  bucketname=bucket_name,
                                  object_key=f"{filename}")
        assert (head_obj["ContentEncoding"] == "zip"
                ), "Put object operation doesn't store ContentEncoding!!"
        logger.info(
            "Put object operation is preserving ContentEncoding as a object metadata"
        )
コード例 #3
0
    def test_object_expiration(self, mcg_obj, bucket_factory):
        """
        Test object is not deleted in minutes when object is set to expire in a day

        """
        # Creating S3 bucket
        bucket = bucket_factory()[0].name
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        obj_data = "Random data" + str(uuid.uuid4().hex)
        # 4.10 dict to be removed once BZ 2091509 is fixed
        expire_rule_4_10 = {
            "Rules": [
                {
                    "Expiration": {"Days": 1, "ExpiredObjectDeleteMarker": False},
                    "ID": "data-expire",
                    "Prefix": "",
                    "Status": "Enabled",
                }
            ]
        }
        expire_rule = {
            "Rules": [
                {
                    "Expiration": {"Days": 1, "ExpiredObjectDeleteMarker": False},
                    "Filter": {"Prefix": ""},
                    "ID": "data-expire",
                    "Status": "Enabled",
                }
            ]
        }

        logger.info(f"Setting object expiration on bucket: {bucket}")
        if version.get_semantic_ocs_version_from_config() < version.VERSION_4_11:
            mcg_obj.s3_client.put_bucket_lifecycle_configuration(
                Bucket=bucket, LifecycleConfiguration=expire_rule_4_10
            )
        else:
            mcg_obj.s3_client.put_bucket_lifecycle_configuration(
                Bucket=bucket, LifecycleConfiguration=expire_rule
            )

        logger.info(f"Getting object expiration configuration from bucket: {bucket}")
        logger.info(
            f"Got configuration: {mcg_obj.s3_client.get_bucket_lifecycle_configuration(Bucket=bucket)}"
        )

        logger.info(f"Writing {object_key} to bucket: {bucket}")
        assert s3_put_object(
            s3_obj=mcg_obj, bucketname=bucket, object_key=object_key, data=obj_data
        ), "Failed: Put Object"

        logger.info("Sleeping for 90 seconds")
        sleep(90)

        logger.info(f"Getting {object_key} from bucket: {bucket} after 90 seconds")
        assert s3_get_object(
            s3_obj=mcg_obj, bucketname=bucket, object_key=object_key
        ), "Failed: Get Object"
コード例 #4
0
 def create_obc_creation(self, bucket_factory, mcg_obj, key):
     """"""
     # Create a bucket then read & write
     bucket_name = bucket_factory(amount=1, interface="OC")[0].name
     obj_data = "A random string data"
     assert s3_put_object(mcg_obj, bucket_name, key,
                          obj_data), f"Failed: Put object, {key}"
     assert s3_get_object(mcg_obj, bucket_name,
                          key), f"Failed: Get object, {key}"
コード例 #5
0
    def test_anonymous_read_only(self, mcg_obj, bucket_factory):
        """
        Tests read only access by an anonymous user
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        # Creating a s3 bucket
        s3_bucket = bucket_factory(amount=1, interface="S3")[0]

        # Creating a random user account
        user = NoobaaAccount(
            mcg_obj, name=user_name, email=email, buckets=[s3_bucket.name]
        )

        # Admin sets policy all users '*' (Public access)
        bucket_policy_generated = gen_bucket_policy(
            user_list=["*"],
            actions_list=["GetObject"],
            resources_list=[f'{s3_bucket.name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {s3_bucket.name} with wildcard (*) Principal"
        )
        put_policy = put_bucket_policy(mcg_obj, s3_bucket.name, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Getting Policy
        logger.info(f"Getting bucket policy on bucket: {s3_bucket.name}")
        get_policy = get_bucket_policy(mcg_obj, s3_bucket.name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Admin writes an object to bucket
        logger.info(f"Writing object on bucket: {s3_bucket.name} by admin")
        assert s3_put_object(
            mcg_obj, s3_bucket.name, object_key, data
        ), "Failed: PutObject"

        # Reading the object by anonymous user
        logger.info(
            f"Getting object by user: {user.email_id} on bucket: {s3_bucket.name} "
        )
        assert s3_get_object(
            user, s3_bucket.name, object_key
        ), f"Failed: Get Object by user {user.email_id}"
コード例 #6
0
ファイル: sanity_helpers.py プロジェクト: yosibsh/ocs-ci
    def obc_put_obj_create_delete(self, mcg_obj, bucket_factory):
        """
        Creates bucket then writes, reads and deletes objects

        """
        bucket_name = bucket_factory(amount=1, interface="OC")[0].name
        self.obj_data = "A string data"

        for i in range(0, 30):
            key = "Object-key-" + f"{i}"
            logger.info(f"Write, read and delete object with key: {key}")
            assert s3_put_object(mcg_obj, bucket_name, key,
                                 self.obj_data), f"Failed: Put object, {key}"
            assert s3_get_object(mcg_obj, bucket_name,
                                 key), f"Failed: Get object, {key}"
            assert s3_delete_object(mcg_obj, bucket_name,
                                    key), f"Failed: Delete object, {key}"
コード例 #7
0
    def mon_recovery_setup(
        self,
        dc_pod_factory,
        mcg_obj,
        bucket_factory,
    ):
        """
        Creates project, pvcs, dc-pods and obcs

        """
        self.filename = "sample_file.txt"
        self.object_key = "obj-key"
        self.object_data = "string data"
        self.dd_cmd = f"dd if=/dev/urandom of=/mnt/{self.filename} bs=5M count=1"

        self.sanity_helpers = Sanity()
        # Create project, pvc, dc pods
        self.dc_pods = []
        self.dc_pods.append(
            dc_pod_factory(
                interface=constants.CEPHBLOCKPOOL,
            )
        )
        self.dc_pods.append(
            dc_pod_factory(
                interface=constants.CEPHFILESYSTEM,
                access_mode=constants.ACCESS_MODE_RWX,
            )
        )
        self.md5sum = []
        for pod_obj in self.dc_pods:
            pod_obj.exec_cmd_on_pod(command=self.dd_cmd)
            # Calculate md5sum
            self.md5sum.append(pod.cal_md5sum(pod_obj, self.filename))
        logger.info(f"Md5sum calculated before recovery: {self.md5sum}")

        self.bucket_name = bucket_factory(interface="OC")[0].name
        logger.info(f"Putting object on: {self.bucket_name}")
        assert bucket_utils.s3_put_object(
            s3_obj=mcg_obj,
            bucketname=self.bucket_name,
            object_key=self.object_key,
            data=self.object_data,
        ), "Failed: PutObject"
コード例 #8
0
    def test_mcg_namespace_object_versions_crd(self, mcg_obj, cld_mgr,
                                               bucket_factory,
                                               bucketclass_dict):
        """
        Test object versioning S3 operations on namespace buckets/resources(CRDs).
        Validates put, get, delete object version operations

        """
        obj_versions = []
        version_key = "ObjKey-" + str(uuid.uuid4().hex)
        total_versions = 10
        aws_s3_resource = boto3.resource(
            "s3",
            endpoint_url=constants.MCG_NS_AWS_ENDPOINT,
            aws_access_key_id=cld_mgr.aws_client.access_key,
            aws_secret_access_key=cld_mgr.aws_client.secret_key,
        )

        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]
        ns_bucket = ns_buc.name
        namespace_res = ns_buc.bucketclass.namespacestores[0].uls_name
        aws_s3_client = aws_s3_resource.meta.client

        # Put, Get bucket versioning and verify
        logger.info(
            f"Enabling bucket versioning on resource bucket: {namespace_res}")
        assert bucket_utils.s3_put_bucket_versioning(
            s3_obj=mcg_obj,
            bucketname=namespace_res,
            status="Enabled",
            s3_client=aws_s3_client,
        ), "Failed: PutBucketVersioning"
        get_ver_res = bucket_utils.s3_get_bucket_versioning(
            s3_obj=mcg_obj, bucketname=namespace_res, s3_client=aws_s3_client)
        logger.info(
            f"Get and verify versioning on resource bucket: {namespace_res}")
        assert get_ver_res[
            "Status"] == "Enabled", "Versioning is not enabled on bucket"

        # Put, List, Get, Delete object version operations
        for i in range(1, total_versions):
            logger.info(f"Writing version {i} of {version_key}")
            obj = bucket_utils.s3_put_object(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                object_key=version_key,
                data=OBJ_DATA,
            )
            obj_versions.append(obj["VersionId"])
        list_ver_resp = bucket_utils.s3_list_object_versions(
            s3_obj=mcg_obj, bucketname=ns_bucket)
        get_list_and_verify(list_ver_resp, obj_versions, "Versions")

        for ver in obj_versions:
            assert bucket_utils.s3_get_object(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                object_key=version_key,
                versionid=ver,
            ), f"Failed to Read object {ver}"
            assert bucket_utils.s3_delete_object(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                object_key=version_key,
                versionid=ver,
            ), f"Failed to Delete object with {ver}"
            logger.info(f"Get and delete version: {ver} of {namespace_res}")

        logger.info(f"Suspending versioning on: {namespace_res}")
        assert bucket_utils.s3_put_bucket_versioning(
            s3_obj=mcg_obj,
            bucketname=namespace_res,
            status="Suspended",
            s3_client=aws_s3_client,
        ), "Failed: PutBucketVersioning"
        logger.info(f"Verifying versioning is suspended on: {namespace_res}")
        get_version_response = bucket_utils.s3_get_bucket_versioning(
            s3_obj=mcg_obj, bucketname=namespace_res, s3_client=aws_s3_client)
        assert (get_version_response["Status"] == "Suspended"
                ), "Versioning is not suspended on bucket"
コード例 #9
0
    def test_public_website(self, mcg_obj, bucket_factory):
        """
        Tests public bucket website access
        """
        # Creating a S3 bucket to host website
        s3_bucket = bucket_factory(amount=1, interface="S3")

        # Creating random S3 users
        users = []
        account1 = "noobaa-user1" + str(uuid.uuid4().hex)
        account2 = "noobaa-user2" + str(uuid.uuid4().hex)
        for account in account1, account2:
            users.append(
                NoobaaAccount(
                    mcg=mcg_obj,
                    name=account,
                    email=f"{account}@mail.com",
                    buckets=[s3_bucket[0].name],
                )
            )

        logger.info(f"Adding bucket website config to: {s3_bucket[0].name}")
        assert s3_put_bucket_website(
            s3_obj=mcg_obj,
            bucketname=s3_bucket[0].name,
            website_config=website_config,
        ), "Failed: PutBucketWebsite"
        logger.info(f"Getting bucket website config from: {s3_bucket[0].name}")
        assert s3_get_bucket_website(
            s3_obj=mcg_obj, bucketname=s3_bucket[0].name
        ), "Failed: GetBucketWebsite"

        logger.info("Writing index and error data to the bucket")
        assert s3_put_object(
            s3_obj=mcg_obj,
            bucketname=s3_bucket[0].name,
            object_key="index.html",
            data=index,
            content_type="text/html",
        ), "Failed: PutObject"
        assert s3_put_object(
            s3_obj=mcg_obj,
            bucketname=s3_bucket[0].name,
            object_key="error.html",
            data=error,
            content_type="text/html",
        ), "Failed: PutObject"

        # Setting Get(read) policy action for all users(public)
        bucket_policy_generated = gen_bucket_policy(
            sid="PublicRead",
            user_list=["*"],
            actions_list=["GetObject"],
            resources_list=[f"{s3_bucket[0].name}/{'*'}"],
            effect="Allow",
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {s3_bucket[0].name} with public access"
        )
        assert put_bucket_policy(
            mcg_obj, s3_bucket[0].name, bucket_policy
        ), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f"Getting bucket policy for bucket: {s3_bucket[0].name}")
        get_policy = get_bucket_policy(mcg_obj, s3_bucket[0].name)
        logger.info(f"Bucket policy: {get_policy['Policy']}")

        # Verifying GetObject by reading the index of the website by anonymous users
        for user in users:
            logger.info(
                f"Getting object using user: {user.email_id} on bucket: {s3_bucket[0].name} "
            )
            assert s3_get_object(
                user, s3_bucket[0].name, "index.html"
            ), f"Failed: Get Object by user {user.email_id}"
コード例 #10
0
    def test_mcg_namespace_basic_s3_ops_crd(self, mcg_obj, cld_mgr,
                                            bucket_factory, bucketclass_dict):
        """
        Test basic S3 operations on namespace buckets.

        1. Validates put, get, copy, head, get_acl, delete object operations
        2. Validates listObjects v1 and v2 with prefix, delimiter combinations with page entries

        """
        max_keys = 50

        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]
        ns_bucket = ns_buc.name

        # Put, Get, Copy, Head, Get Acl and Delete object operations
        logger.info(f"Put and Get object operation on {ns_bucket}")
        assert bucket_utils.s3_put_object(s3_obj=mcg_obj,
                                          bucketname=ns_bucket,
                                          object_key=ROOT_OBJ,
                                          data=OBJ_DATA), "Failed: PutObject"
        get_res = bucket_utils.s3_get_object(s3_obj=mcg_obj,
                                             bucketname=ns_bucket,
                                             object_key=ROOT_OBJ)

        list_response = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                        bucketname=ns_bucket)
        get_list_and_verify(list_response, [ROOT_OBJ], "Contents")

        assert bucket_utils.s3_copy_object(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            source=f"/{ns_bucket}/{ROOT_OBJ}",
            object_key=COPY_OBJ,
        ), "Failed: CopyObject"
        get_copy_res = bucket_utils.s3_get_object(s3_obj=mcg_obj,
                                                  bucketname=ns_bucket,
                                                  object_key=COPY_OBJ)
        logger.info(f"Verifying Etag of {COPY_OBJ} from Get object operations")
        assert get_copy_res["ETag"] == get_res["ETag"], "Incorrect object key"

        assert bucket_utils.s3_head_object(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=ROOT_OBJ,
            if_match=get_res["ETag"],
        ), "ETag does not match with the head object"

        get_acl_res = bucket_utils.s3_get_object_acl(s3_obj=mcg_obj,
                                                     bucketname=ns_bucket,
                                                     object_key=ROOT_OBJ)
        logger.info(
            f"Verifying Get object ACl response: {get_acl_res['Grants']}")
        assert (get_acl_res["Grants"][0]["Grantee"]["ID"] ==
                get_acl_res["Owner"]["ID"]), "Invalid Grant ID"

        logger.info(
            f"Deleting {ROOT_OBJ} and {COPY_OBJ} and verifying response")
        del_res = bucket_utils.s3_delete_objects(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_keys=[{
                "Key": f"{ROOT_OBJ}"
            }, {
                "Key": f"{COPY_OBJ}"
            }],
        )
        for i, key in enumerate([ROOT_OBJ, COPY_OBJ]):
            assert (key == del_res["Deleted"][i]["Key"]
                    ), "Object key not found/not-deleted"

        logger.info("Setting up objects to verify list operations")
        obj_keys, obj_prefixes, mid_index = setup_objects_to_list(
            amount=100,
            prefix="Drive/Folder",
            bucket_name=ns_bucket,
            mcg_obj=mcg_obj,
        )

        # List v1 and page entries
        logger.info(f"ListObjectsV1 operation on {ns_bucket}")
        list_v1_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                      bucketname=ns_bucket)
        get_list_and_verify(list_v1_res, obj_keys, "Contents", version="v1")
        logger.info(
            "Get and verify next page entries of list using ListObjectV1")
        first_page_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                         bucketname=ns_bucket,
                                                         max_keys=max_keys)
        last_key = get_list_and_verify(first_page_res,
                                       obj_keys[:mid_index],
                                       "Contents",
                                       version="v1")
        next_page_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                        bucketname=ns_bucket,
                                                        max_keys=max_keys,
                                                        marker=last_key)
        get_list_and_verify(next_page_res,
                            obj_keys[mid_index:],
                            "Contents",
                            version="v1")

        # List v1 with prefix and page entries
        logger.info(f"ListObjectsV1 operation on {ns_bucket} with prefix")
        list_v1_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                      bucketname=ns_bucket,
                                                      prefix="Drive/")
        get_list_and_verify(list_v1_res,
                            obj_keys,
                            "Contents",
                            "Drive/",
                            version="v1")
        logger.info(
            "Get and verify next page entries of list using ListObjectV1 with prefix"
        )
        first_page_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                         bucketname=ns_bucket,
                                                         prefix="Drive/",
                                                         max_keys=max_keys)
        last_key = get_list_and_verify(first_page_res,
                                       obj_keys[:mid_index],
                                       "Contents",
                                       "Drive/",
                                       version="v1")
        next_page_res = bucket_utils.s3_list_objects_v1(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            prefix="Drive/",
            max_keys=max_keys,
            marker=last_key,
        )
        get_list_and_verify(next_page_res,
                            obj_keys[mid_index:],
                            "Contents",
                            "Drive/",
                            version="v1")

        # List v1 with prefix, delimiter and page entries
        logger.info(
            f"ListObjectsV1 operation on {ns_bucket} with prefix and delimiter"
        )
        list_v1_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                      bucketname=ns_bucket,
                                                      prefix="Drive/",
                                                      delimiter="/")
        get_list_and_verify(list_v1_res,
                            obj_prefixes,
                            "CommonPrefixes",
                            "Drive/",
                            "/",
                            version="v1")
        logger.info(
            "Get and verify next page entries of list using ListObjectV1 with prefix and delimiter"
        )
        first_page_res = bucket_utils.s3_list_objects_v1(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            prefix="Drive/",
            delimiter="/",
            max_keys=max_keys,
        )
        get_list_and_verify(
            first_page_res,
            obj_prefixes[:mid_index],
            "CommonPrefixes",
            "Drive/",
            "/",
            version="v1",
        )
        next_page_res = bucket_utils.s3_list_objects_v1(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            prefix="Drive/",
            delimiter="/",
            max_keys=max_keys,
            marker=first_page_res["NextMarker"],
        )
        get_list_and_verify(
            next_page_res,
            obj_prefixes[mid_index:],
            "CommonPrefixes",
            "Drive/",
            "/",
            version="v1",
        )

        # List v2
        logger.info(f"ListObjectsV2 operation on {ns_bucket}")
        list_v2_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                      bucketname=ns_bucket)
        get_list_and_verify(list_v2_res, obj_keys, "Contents", version="v2")
        logger.info(
            "Get and verify next page entries of list using ListObjectV2")
        first_page_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                         bucketname=ns_bucket,
                                                         max_keys=max_keys)
        get_list_and_verify(first_page_res, obj_keys, "Contents", version="v2")
        next_page_res = bucket_utils.s3_list_objects_v2(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            max_keys=max_keys,
            con_token=first_page_res["NextContinuationToken"],
        )
        get_list_and_verify(next_page_res,
                            obj_keys[mid_index:],
                            "Contents",
                            version="v2")

        # List v2 with prefix
        logger.info(f"ListObjectsV2 operation on {ns_bucket} with prefix")
        list_v2_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                      bucketname=ns_bucket,
                                                      prefix="Drive/")
        get_list_and_verify(list_v2_res,
                            obj_keys,
                            "Contents",
                            "Drive/",
                            version="v2")
        logger.info(
            "Get and verify next page entries of list using ListObjectV2 with prefix"
        )
        first_page_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                         bucketname=ns_bucket,
                                                         prefix="Drive/",
                                                         max_keys=max_keys)
        get_list_and_verify(first_page_res,
                            obj_keys[:mid_index],
                            "Contents",
                            "Drive/",
                            version="v2")
        next_page_res = bucket_utils.s3_list_objects_v2(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            prefix="Drive/",
            max_keys=max_keys,
            con_token=first_page_res["NextContinuationToken"],
        )
        get_list_and_verify(next_page_res,
                            obj_keys[mid_index:],
                            "Contents",
                            "Drive/",
                            version="v2")

        # List v2 with prefix and delimiter
        logger.info(
            f"ListObjectsV2 operation on {ns_bucket} with prefix and delimiter"
        )
        list_v2_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                      bucketname=ns_bucket,
                                                      prefix="Drive/",
                                                      delimiter="/")
        get_list_and_verify(list_v2_res,
                            obj_prefixes,
                            "CommonPrefixes",
                            "Drive/",
                            "/",
                            version="v2")
        logger.info(
            "Get and verify next page entries of ListObjectV2 with prefix and delimiter"
        )
        first_page_res = bucket_utils.s3_list_objects_v2(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            prefix="Drive/",
            delimiter="/",
            max_keys=max_keys,
        )
        get_list_and_verify(
            first_page_res,
            obj_prefixes[:mid_index],
            "CommonPrefixes",
            "Drive/",
            "/",
            version="v2",
        )
        next_page_res = bucket_utils.s3_list_objects_v2(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            prefix="Drive/",
            delimiter="/",
            max_keys=max_keys,
            con_token=first_page_res["NextContinuationToken"],
        )
        get_list_and_verify(
            next_page_res,
            obj_prefixes[mid_index:],
            "CommonPrefixes",
            "Drive/",
            "/",
            version="v2",
        )
コード例 #11
0
    def test_mcg_namespace_lifecycle_crd(
        self,
        mcg_obj,
        cld_mgr,
        awscli_pod,
        bucket_factory,
        test_directory_setup,
        bucketclass_dict,
    ):
        """
        Test MCG namespace resource/bucket lifecycle using CRDs

        1. Create namespace resources with CRDs
        2. Create namespace bucket with CRDs
        3. Set bucket policy on namespace bucket with a S3 user principal
        4. Verify bucket policy.
        5. Read/write directly on namespace resource target.
        6. Edit the namespace bucket
        7. Delete namespace resource and bucket

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        if (constants.RGW_PLATFORM in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            s3_creds = {
                "access_key_id": cld_mgr.rgw_client.access_key,
                "access_key": cld_mgr.rgw_client.secret_key,
                "endpoint": cld_mgr.rgw_client.endpoint,
            }
        else:
            s3_creds = {
                "access_key_id": cld_mgr.aws_client.access_key,
                "access_key": cld_mgr.aws_client.secret_key,
                "endpoint": constants.MCG_NS_AWS_ENDPOINT,
                "region": config.ENV_DATA["region"],
            }

        # Noobaa s3 account details
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        # Create the namespace resource and bucket
        ns_bucket = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]
        aws_target_bucket = ns_bucket.bucketclass.namespacestores[0].uls_name
        logger.info(f"Namespace bucket: {ns_bucket.name} created")

        # Noobaa S3 account
        user = NoobaaAccount(mcg_obj,
                             name=user_name,
                             email=email,
                             buckets=[ns_bucket.name])
        logger.info(f"Noobaa account: {user.email_id} with S3 access created")

        bucket_policy_generated = gen_bucket_policy(
            user_list=[user.email_id],
            actions_list=["DeleteObject"],
            effect="Deny",
            resources_list=[f'{ns_bucket.name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)
        logger.info(
            f"Creating bucket policy on bucket: {ns_bucket.name} with wildcard (*) Principal"
        )
        put_policy = put_bucket_policy(mcg_obj, ns_bucket.name, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Getting Policy
        logger.info(f"Getting bucket policy on bucket: {ns_bucket.name}")
        get_policy = get_bucket_policy(mcg_obj, ns_bucket.name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # MCG admin writes an object to bucket
        logger.info(f"Writing object on bucket: {ns_bucket.name} by admin")
        assert s3_put_object(mcg_obj, ns_bucket.name, object_key,
                             data), "Failed: PutObject"

        # Verifying whether Get & Put object is allowed to S3 user
        logger.info(f"Get object action on namespace bucket: {ns_bucket.name}"
                    f" with user: {user.email_id}")
        assert s3_get_object(user, ns_bucket.name,
                             object_key), "Failed: GetObject"
        logger.info(f"Put object action on namespace bucket: {ns_bucket.name}"
                    f" with user: {user.email_id}")
        assert s3_put_object(user, ns_bucket.name, object_key,
                             data), "Failed: PutObject"

        # Verifying whether Delete object action is denied
        logger.info(f"Verifying whether user: {user.email_id} "
                    f"is denied to Delete object after updating policy")
        try:
            s3_delete_object(user, ns_bucket.name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Delete object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code "
                    f"{response.error['Code']}")
        else:
            assert (
                False
            ), "Delete object operation was granted access, when it should have denied"

        logger.info(
            "Setting up test files for upload, to the bucket/resources")
        setup_base_objects(awscli_pod,
                           test_directory_setup.origin_dir,
                           amount=3)

        # Upload files directly to NS resources
        logger.info(
            f"Uploading objects directly to ns resource target: {aws_target_bucket}"
        )
        sync_object_directory(
            awscli_pod,
            src=test_directory_setup.origin_dir,
            target=f"s3://{aws_target_bucket}",
            signed_request_creds=s3_creds,
        )

        # Read files directly from NS resources
        logger.info(
            f"Downloading objects directly from ns resource target: {aws_target_bucket}"
        )
        sync_object_directory(
            awscli_pod,
            src=f"s3://{aws_target_bucket}",
            target=test_directory_setup.result_dir,
            signed_request_creds=s3_creds,
        )

        # Edit namespace bucket
        logger.info(f"Editing the namespace resource bucket: {ns_bucket.name}")
        namespace_bucket_update(
            mcg_obj,
            bucket_name=ns_bucket.name,
            read_resource=[aws_target_bucket],
            write_resource=aws_target_bucket,
        )

        # Verify Download after editing bucket
        logger.info(
            f"Downloading objects directly from ns bucket target: {ns_bucket.name}"
        )
        sync_object_directory(
            awscli_pod,
            src=f"s3://{ns_bucket.name}",
            target=test_directory_setup.result_dir,
            s3_obj=mcg_obj,
        )

        # MCG namespace bucket delete
        logger.info(
            f"Deleting all objects on namespace resource bucket: {ns_bucket.name}"
        )
        rm_object_recursive(awscli_pod, ns_bucket.name, mcg_obj)

        # Namespace resource delete
        logger.info(f"Deleting the resource: {aws_target_bucket}")
        mcg_obj.delete_ns_resource(ns_resource_name=aws_target_bucket)
コード例 #12
0
    def test_mcg_cache_lifecycle(
        self,
        mcg_obj,
        cld_mgr,
        awscli_pod,
        bucket_factory,
        test_directory_setup,
        bucketclass_dict,
    ):
        """
        Test MCG cache bucket lifecycle

        1. Create cache buckets on each namespace stores (RGW-OBC/OBC)
        2. Verify write operations cache and hub bucket
        3. Verify read/list operations on cache bucket and hub target
        4. Verify delete operation on buckets
        5. Delete multiple cache buckets with data still in ns store
        6. Recreate the cache buckets on ns store(with existing data) then read.

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        if (constants.RGW_PLATFORM in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            s3_creds = {
                "access_key_id": cld_mgr.rgw_client.access_key,
                "access_key": cld_mgr.rgw_client.secret_key,
                "endpoint": cld_mgr.rgw_client.endpoint,
            }
            logger.info("RGW obc will be created as cache bucket")
            obc_interface = "rgw-oc"
        else:
            s3_creds = {
                "access_key_id": cld_mgr.aws_client.access_key,
                "access_key": cld_mgr.aws_client.secret_key,
                "endpoint": constants.MCG_NS_AWS_ENDPOINT,
                "region": config.ENV_DATA["region"],
            }
            logger.info("Noobaa obc will be created as cache bucket")
            obc_interface = bucketclass_dict["interface"]

        # Create the namespace resource and bucket
        ns_bucket = bucket_factory(
            interface=obc_interface,
            bucketclass=bucketclass_dict,
        )[0]
        logger.info(f"Cache bucket: {ns_bucket.name} created")
        target_bucket = ns_bucket.bucketclass.namespacestores[0].uls_name

        # Write to cache
        logger.info(f"Writing object on cache bucket: {ns_bucket.name}")
        assert s3_put_object(mcg_obj, ns_bucket.name, object_key,
                             data), "Failed: PutObject"
        wait_for_cache(mcg_obj, ns_bucket.name, [object_key])

        # Write to hub and read from cache
        logger.info("Setting up test files for upload")
        setup_base_objects(awscli_pod,
                           test_directory_setup.origin_dir,
                           amount=3)
        logger.info(f"Uploading objects to ns target: {target_bucket}")
        sync_object_directory(
            awscli_pod,
            src=test_directory_setup.origin_dir,
            target=f"s3://{target_bucket}",
            signed_request_creds=s3_creds,
        )
        sync_object_directory(
            awscli_pod,
            f"s3://{ns_bucket.name}",
            test_directory_setup.result_dir,
            mcg_obj,
        )

        # Read cached object
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"

        # Read stale object(ttl expired)
        sleep(bucketclass_dict["namespace_policy_dict"]["ttl"] / 1000)
        logger.info(f"Get object on cache bucket: {ns_bucket.name}")
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"

        # List on cache bucket
        list_response = s3_list_objects_v1(s3_obj=mcg_obj,
                                           bucketname=ns_bucket.name)
        logger.info(f"Listed objects: {list_response}")

        # Delete object from cache bucket
        s3_delete_object(mcg_obj, ns_bucket.name, object_key)
        sleep(5)
        # Try to read deleted object
        try:
            s3_get_object(mcg_obj, ns_bucket.name, object_key)
        except boto3exception.ClientError:
            logger.info("object deleted successfully")

        # Validate deletion on the hub
        if (constants.RGW_PLATFORM in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            obj_list = list(
                cld_mgr.rgw_client.client.Bucket(target_bucket).objects.all())
        else:
            obj_list = list(
                cld_mgr.aws_client.client.Bucket(target_bucket).objects.all())
        if object_key in obj_list:
            raise UnexpectedBehaviour(
                "Object was not deleted from cache properly")

        # Recreate and validate object
        assert s3_put_object(mcg_obj, ns_bucket.name, object_key,
                             data), "Failed: PutObject"
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"

        logger.info(f"Deleting cache bucket {ns_bucket.name}")
        curr_ns_store = ns_bucket.bucketclass.namespacestores[0]
        ns_bucket.delete()
        new_bucket_class = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Cache",
                "ttl": 180000,
                "namespacestores": [curr_ns_store],
            },
            "placement_policy": {
                "tiers": [{
                    "backingStores": [constants.DEFAULT_NOOBAA_BACKINGSTORE]
                }]
            },
        }
        logger.info(
            f"Recreating cache bucket {ns_bucket.name} using current hub: {target_bucket}"
        )
        ns_bucket = bucket_factory(
            interface=obc_interface,
            bucketclass=new_bucket_class,
        )[0]
        logger.info(
            f"Read existing data on hub: {target_bucket} through cache bucket: {ns_bucket.name}"
        )
        assert s3_get_object(mcg_obj, ns_bucket.name,
                             object_key), "Failed: GetObject"
コード例 #13
0
    def test_all_worker_nodes_short_network_failure(self, nodes, setup,
                                                    mcg_obj, bucket_factory,
                                                    node_restart_teardown):
        """
        OCS-1432/OCS-1433:
        - Start DeploymentConfig based app pods
        - Make all the worker nodes unresponsive by doing abrupt network failure
        - Reboot the unresponsive node after short duration of ~300 seconds
        - When unresponsive node recovers, app pods and ceph cluster should recover
        - Again run IOs from app pods
        - Create OBC and read/write objects
        """
        pod_objs = setup
        worker_nodes = node.get_worker_nodes()

        # Run IO on pods
        logger.info(f"Starting IO on {len(pod_objs)} app pods")
        with ThreadPoolExecutor() as executor:
            for pod_obj in pod_objs:
                logger.info(f"Starting IO on pod {pod_obj.name}")
                storage_type = ("block" if pod_obj.pvc.get_pvc_vol_mode
                                == "Block" else "fs")
                executor.submit(
                    pod_obj.run_io,
                    storage_type=storage_type,
                    size="2G",
                    runtime=30,
                    fio_filename=f"{pod_obj.name}_io_f1",
                )

        logger.info(f"IO started on all {len(pod_objs)} app pods")

        # Wait for IO results
        for pod_obj in pod_objs:
            pod.get_fio_rw_iops(pod_obj)

        # Induce network failure on all worker nodes
        with ThreadPoolExecutor() as executor:
            for node_name in worker_nodes:
                executor.submit(node.node_network_failure, node_name, False)

        node.wait_for_nodes_status(node_names=worker_nodes,
                                   status=constants.NODE_NOT_READY)

        logger.info(f"Waiting for {self.short_nw_fail_time} seconds")
        sleep(self.short_nw_fail_time)

        # Reboot the worker nodes
        logger.info(f"Stop and start the worker nodes: {worker_nodes}")
        nodes.restart_nodes_by_stop_and_start(node.get_node_objs(worker_nodes))

        try:
            node.wait_for_nodes_status(node_names=worker_nodes,
                                       status=constants.NODE_READY)
            logger.info("Wait for OCS pods to be in running state")
            if not pod.wait_for_pods_to_be_running(timeout=720):
                raise ResourceWrongStatusException(
                    "Pods are not in running state")
        except ResourceWrongStatusException:
            # Restart nodes
            nodes.restart_nodes(node.get_node_objs(worker_nodes))

        ceph_health_check(tries=80)

        # Get current info of app pods
        new_pod_objs = list()
        for pod_obj in pod_objs:
            pod_label = pod_obj.labels.get("deploymentconfig")
            pods_data = pod.get_pods_having_label(
                f"deploymentconfig={pod_label}", pod_obj.namespace)
            current_pods = [
                pod_data.get("metadata").get("name") for pod_data in pods_data
                if "-deploy" not in pod_data.get("metadata").get("name")
            ]
            logger.info(f"Pods with label {pod_label}: {current_pods}")

            # Remove the older pod from the list if pod is rescheduled
            if len(current_pods) > 1:
                current_pods.remove(pod_obj.name)

            new_pod_obj = pod.get_pod_obj(current_pods.pop(),
                                          pod_obj.namespace)
            new_pod_obj.pvc = pod_obj.pvc
            new_pod_objs.append(new_pod_obj)

        logger.info("Wait for app pods are in running state")
        for pod_obj in new_pod_objs:
            pod_obj.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                resource_name=pod_obj.name,
                timeout=720,
                sleep=20,
            )
        logger.info("All the app pods reached running state")

        # Run more IOs on app pods
        with ThreadPoolExecutor() as executor:
            for pod_obj in new_pod_objs:
                logger.info(f"Starting IO on pod {pod_obj.name}")
                pod_obj.wl_setup_done = False
                storage_type = ("block" if pod_obj.pvc.get_pvc_vol_mode
                                == "Block" else "fs")
                executor.submit(
                    pod_obj.run_io,
                    storage_type=storage_type,
                    size="1G",
                    runtime=30,
                    fio_filename=f"{pod_obj.name}_io_f2",
                )

        for pod_obj in new_pod_objs:
            pod.get_fio_rw_iops(pod_obj)

        bucket_name = bucket_factory(interface="OC")[0].name
        logger.info(f"Created new bucket {bucket_name}")
        assert s3_put_object(
            s3_obj=mcg_obj,
            bucketname=bucket_name,
            object_key="test-obj",
            data="string data",
        ), "Failed: Put object"
        assert s3_get_object(s3_obj=mcg_obj,
                             bucketname=bucket_name,
                             object_key="test-obj"), "Failed: Get object"
コード例 #14
0
    def test_bucket_versioning_and_policies(self, mcg_obj, bucket_factory):
        """
        Tests bucket and object versioning on Noobaa buckets and also its related actions
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        object_versions = []

        # Creating a OBC user (Account)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets a policy on OBC bucket to allow versioning related actions
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=bucket_version_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ])
        bucket_policy = json.dumps(bucket_policy_generated)

        # Creating policy
        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f'Enabling bucket versioning on {obc_obj.bucket_name} using User: {obc_obj.obc_account}'
        )
        assert s3_put_bucket_versioning(
            s3_obj=obc_obj, bucketname=obc_obj.bucket_name,
            status="Enabled"), "Failed: PutBucketVersioning"

        logger.info(
            f'Verifying whether versioning is enabled on bucket: {obc_obj.bucket_name}'
        )
        assert s3_get_bucket_versioning(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: GetBucketVersioning"

        # Admin modifies the policy to all obc-account to write/read/delete versioned objects
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=object_version_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy for bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        for key in range(5):
            logger.info(f"Writing {key} version of {object_key}")
            obj = s3_put_object(s3_obj=obc_obj,
                                bucketname=obc_obj.bucket_name,
                                object_key=object_key,
                                data=data)
            object_versions.append(obj['VersionId'])

        for version in object_versions:
            logger.info(f"Reading version: {version} of {object_key}")
            assert s3_get_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                versionid=version), f"Failed: To Read object {version}"
            logger.info(f"Deleting version: {version} of {object_key}")
            assert s3_delete_object(
                s3_obj=obc_obj,
                bucketname=obc_obj.bucket_name,
                object_key=object_key,
                versionid=version), f"Failed: To Delete object with {version}"

        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['PutBucketVersioning'],
            resources_list=[obc_obj.bucket_name])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} by Admin'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Suspending bucket versioning on {obc_obj.bucket_name} using User: {obc_obj.obc_account}"
        )
        assert s3_put_bucket_versioning(
            s3_obj=obc_obj, bucketname=obc_obj.bucket_name,
            status="Suspended"), "Failed: PutBucketVersioning"

        # Verifying whether GetBucketVersion action is denied access
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to GetBucketVersion'
        )
        try:
            s3_get_bucket_versioning(s3_obj=obc_obj,
                                     bucketname=obc_obj.bucket_name)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
コード例 #15
0
    def test_monitor_recovery(
        self,
        dc_pod_factory,
        mcg_obj,
        bucket_factory,
    ):
        """
        Verifies Monitor recovery procedure as per:
        https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.8/html/troubleshooting_openshift_container_storage/restoring-the-monitor-pods-in-openshift-container-storage_rhocs

        """
        # Initialize mon recovery class
        mon_recovery = MonitorRecovery()

        logger.info("Corrupting ceph monitors by deleting store.db")
        corrupt_ceph_monitors()

        logger.info("Backing up all the deployments")
        mon_recovery.backup_deployments()
        dep_revert, mds_revert = mon_recovery.deployments_to_revert()

        logger.info("Starting the monitor recovery procedure")
        logger.info("Scaling down rook and ocs operators")
        mon_recovery.scale_rook_ocs_operators(replica=0)

        logger.info(
            "Preparing script and patching OSDs to remove LivenessProbe and sleep to infinity"
        )
        mon_recovery.prepare_monstore_script()
        mon_recovery.patch_sleep_on_osds()
        switch_to_project(constants.OPENSHIFT_STORAGE_NAMESPACE)

        logger.info("Getting mon-store from OSDs")
        mon_recovery.run_mon_store()

        logger.info("Patching MONs to sleep infinitely")
        mon_recovery.patch_sleep_on_mon()

        logger.info("Updating initial delay on all monitors")
        update_mon_initial_delay()

        logger.info("Generating monitor map command using the IPs")
        mon_map_cmd = generate_monmap_cmd()

        logger.info("Getting ceph keyring from ocs secrets")
        mon_recovery.get_ceph_keyrings()

        logger.info("Rebuilding Monitors to recover store db")
        mon_recovery.monitor_rebuild(mon_map_cmd)

        logger.info("Reverting mon, osd and mgr deployments")
        mon_recovery.revert_patches(dep_revert)

        logger.info("Scaling back rook and ocs operators")
        mon_recovery.scale_rook_ocs_operators(replica=1)

        logger.info("Recovering CephFS")
        mon_recovery.scale_rook_ocs_operators(replica=0)
        logger.info(
            "Patching MDSs to remove LivenessProbe and setting sleep to infinity"
        )
        mon_recovery.patch_sleep_on_mds()
        logger.info("Resetting the fs")
        ceph_fs_recovery()
        logger.info("Reverting MDS deployments")
        mon_recovery.revert_patches(mds_revert)
        logger.info("Scaling back rook and ocs operators")
        mon_recovery.scale_rook_ocs_operators(replica=1)
        logger.info("Recovering mcg by re-spinning the pods")
        recover_mcg()
        remove_global_id_reclaim()
        for pod_obj in self.dc_pods:
            pod_obj.delete(force=True)
        new_md5_sum = []
        logger.info("Verifying md5sum of files after recovery")
        for pod_obj in get_spun_dc_pods(self.dc_pods):
            pod_obj.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                resource_name=pod_obj.name,
                timeout=600,
                sleep=10,
            )
            new_md5_sum.append(pod.cal_md5sum(pod_obj, self.filename))
        logger.info(f"Md5sum calculated after recovery: {new_md5_sum}")
        if collections.Counter(new_md5_sum) == collections.Counter(self.md5sum):
            logger.info(
                f"Verified: md5sum of {self.filename} on pods matches with the original md5sum"
            )
        else:
            assert False, f"Data corruption found {new_md5_sum} and {self.md5sum}"
        logger.info("Getting object after recovery")
        assert bucket_utils.s3_get_object(
            s3_obj=mcg_obj,
            bucketname=self.bucket_name,
            object_key=self.object_key,
        ), "Failed: GetObject"

        # New pvc, dc pods, obcs
        new_dc_pods = [
            dc_pod_factory(
                interface=constants.CEPHBLOCKPOOL,
            ),
            dc_pod_factory(
                interface=constants.CEPHFILESYSTEM,
            ),
        ]
        for pod_obj in new_dc_pods:
            pod_obj.exec_cmd_on_pod(command=self.dd_cmd)
        logger.info("Creating new bucket and write object")
        new_bucket = bucket_factory(interface="OC")[0].name
        assert bucket_utils.s3_put_object(
            s3_obj=mcg_obj,
            bucketname=new_bucket,
            object_key=self.object_key,
            data=self.object_data,
        ), "Failed: PutObject"
        wait_for_storage_pods()
        logger.info("Archiving the ceph crash warnings")
        tool_pod = get_ceph_tools_pod()
        tool_pod.exec_ceph_cmd(ceph_cmd="ceph crash archive-all", format=None)
        self.sanity_helpers.health_check(tries=10)
コード例 #16
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['PutObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'])
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       bucket_policy)
        logger.info(f'Put bucket policy response from Admin: {put_policy}')

        # Get Policy
        logger.info(f'Getting Bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether obc account can put object
        logger.info(f'Adding object on bucket: {obc_obj.bucket_name}')
        assert s3_put_object(obc_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Get object'
        )
        try:
            s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f'Creating multipart on bucket: {obc_obj.bucket_name} with key: {object_key}'
        )
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Delete object'
        )
        try:
            s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Delete action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(mcg_obj,
                             name=user_name,
                             email=email,
                             buckets=[obc_obj.bucket_name])

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=user.email_id,
            actions_list=['GetObject', 'DeleteObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'])
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                       new_policy)
        logger.info(f'Put bucket policy response from admin: {put_policy}')

        # Get Policy
        logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f'Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}'
        )
        assert s3_get_object(user, obc_obj.bucket_name,
                             object_key), "Failed: Get Object"
        logger.info(
            f'Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}'
        )
        assert s3_delete_object(user, obc_obj.bucket_name,
                                object_key), "Failed: Delete Object"

        # Verifying whether Put object action is denied
        logger.info(
            f'Verifying whether user: {user.email_id} is denied to Put object after updating policy'
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Put object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
コード例 #17
0
    def test_object_actions(self, mcg_obj, bucket_factory):
        """
        Test to verify different object actions and cross account access to buckets
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc users (accounts)
        obc = bucket_factory(amount=1, interface="OC")
        obc_obj = OBC(obc[0].name)

        # Creating noobaa account to access bucket belonging to obc account
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"
        user = NoobaaAccount(
            mcg_obj, name=user_name, email=email, buckets=[obc_obj.bucket_name]
        )

        # Admin sets policy on obc bucket with obc account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=[obc_obj.obc_account, user.email_id],
            actions_list=["PutObject"]
            if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6
            else ["GetObject", "DeleteObject"],
            effect="Allow"
            if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6
            else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether users can put object
        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {obc_obj.obc_account}"
        )
        assert s3_put_object(
            obc_obj, obc_obj.bucket_name, object_key, data
        ), "Failed: Put Object"

        logger.info(
            f"Adding object on bucket: {obc_obj.bucket_name} using user: {user.email_id}"
        )
        assert s3_put_object(
            user, obc_obj.bucket_name, object_key, data
        ), "Failed: Put Object"

        # Verifying whether Get action is not allowed
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f" is denied to Get object"
        )
        try:
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6:
                s3_get_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Get Object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Get object succeeded when it should have failed"

        if version.get_semantic_ocs_version_from_config() == version.VERSION_4_6:
            logger.info(
                f"Verifying whether the user: "******"{obc_obj.obc_account} is able to access Get action"
                f"irrespective of the policy set"
            )
            assert s3_get_object(
                obc_obj, obc_obj.bucket_name, object_key
            ), "Failed: Get Object"

        # Verifying whether obc account allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name}"
            f" with key: {object_key} using user: {obc_obj.obc_account}"
        )
        create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key)

        # Verifying whether S3 user is allowed to create multipart
        logger.info(
            f"Creating multipart on bucket: {obc_obj.bucket_name} "
            f"with key: {object_key} using user: {user.email_id}"
        )
        create_multipart_upload(user, obc_obj.bucket_name, object_key)

        # Verifying whether obc account is denied access to delete object
        logger.info(
            f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}'
            f"is denied to Delete object"
        )
        try:
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            else:
                s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Delete action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
        else:
            assert False, "Delete object succeeded when it should have failed"

        # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access)
        new_policy_generated = gen_bucket_policy(
            user_list=[user.email_id],
            actions_list=["GetObject", "DeleteObject"]
            if float(config.ENV_DATA["ocs_version"]) <= 4.6
            else ["PutObject"],
            effect="Allow"
            if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6
            else "Deny",
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
        )
        new_policy = json.dumps(new_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}"
        )
        put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, new_policy)
        logger.info(f"Put bucket policy response from admin: {put_policy}")

        # Get Policy
        logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}")
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get, Delete object is allowed
        logger.info(
            f"Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for get_resp in TimeoutSampler(
            30, 4, s3_get_object, user, obc_obj.bucket_name, object_key
        ):
            if "403" not in str(get_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("GetObj operation successful")
                break
            else:
                logger.info("GetObj operation is denied access")
        logger.info(
            f"Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}"
        )
        for del_resp in TimeoutSampler(
            30, 4, s3_delete_object, user, obc_obj.bucket_name, object_key
        ):
            if "403" not in str(del_resp["ResponseMetadata"]["HTTPStatusCode"]):
                logger.info("DeleteObj operation successful")
                break
            else:
                logger.info("DeleteObj operation is denied access")

        # Verifying whether Put object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} is denied to Put object after updating policy"
        )
        try:
            s3_put_object(user, obc_obj.bucket_name, object_key, data)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "AccessDenied":
                logger.info("Put object action has been denied access")
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
コード例 #18
0
    def test_ns_bucket_unsigned_access(self, mcg_obj, bucket_factory,
                                       namespace_store_factory):
        """
        Test anonymous(unsigned) access of S3 operations are denied on Namespace bucket.
        """
        sample_data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Create the namespace bucket
        nss_tup = ("oc", {"aws": [(1, self.DEFAULT_REGION)]})
        ns_store = namespace_store_factory(*nss_tup)[0]
        bucketclass_dict = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Single",
                "namespacestores": [ns_store],
            },
        }
        ns_bucket = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0].name

        # Put and Get object operations done with s3 credentials
        logger.info(f"Put and Get object operations on {ns_bucket}")
        assert bucket_utils.s3_put_object(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=object_key,
            data=sample_data,
        ), "Failed: PutObject"
        assert bucket_utils.s3_get_object(
            s3_obj=mcg_obj, bucketname=ns_bucket,
            object_key=object_key), "Failed: GetObject"

        # Boto3 client with signing disabled
        anon_s3_client = boto3.client(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=mcg_obj.s3_endpoint,
            config=Config(signature_version=UNSIGNED),
        )

        logger.info(
            f"Verifying anonymous access is blocked on namespace bucket: {ns_bucket}"
        )
        try:
            anon_s3_client.get_object(Bucket=ns_bucket, Key=object_key)
        except boto3exception.ClientError as e:
            response = HttpResponseParser(e.response)
            assert (response.error["Code"] == "AccessDenied"
                    ), f"Invalid error code:{response.error['Code']}"
            assert (response.status_code == 403
                    ), f"Invalid status code:{response.status_code}"
            assert (response.error["Message"] == "Access Denied"
                    ), f"Invalid error message:{response.error['Message']}"
        else:
            assert (
                False
            ), "GetObject operation has been granted access, when it should have been blocked"
コード例 #19
0
    def test_bucket_policy_multi_statement(self, mcg_obj, bucket_factory):
        """
        Tests multiple statements in a bucket policy
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        user_name = "noobaa-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        # Creating OBC (account) and Noobaa user account
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)
        noobaa_user = NoobaaAccount(mcg_obj,
                                    name=user_name,
                                    email=email,
                                    buckets=[obc_obj.bucket_name])
        accounts = [obc_obj, noobaa_user]

        # Statement_1 public read access to a bucket
        single_statement_policy = gen_bucket_policy(
            sid="statement-1",
            user_list=["*"],
            actions_list=['GetObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
            effect="Allow")

        # Additional Statements; Statement_2 - PutObject permission on specific user
        # Statement_3 - Denying Permission to DeleteObject action for aultiple Users
        new_statements = {
            "statement_2": {
                'Action': 's3:PutObject',
                'Effect': 'Allow',
                'Principal': noobaa_user.email_id,
                'Resource': [f'arn:aws:s3:::{obc_obj.bucket_name}/{"*"}'],
                'Sid': 'Statement-2'
            },
            "statement_3": {
                'Action': 's3:DeleteObject',
                'Effect': 'Deny',
                'Principal': [obc_obj.obc_account, noobaa_user.email_id],
                'Resource': [f'arn:aws:s3:::{"*"}'],
                'Sid': 'Statement-3'
            }
        }

        for key, value in new_statements.items():
            single_statement_policy["Statement"].append(value)

        logger.info(f"New policy {single_statement_policy}")
        bucket_policy = json.dumps(single_statement_policy)

        # Creating Policy
        logger.info(
            f'Creating multi statement bucket policy on bucket: {obc_obj.bucket_name}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy "

        # Getting Policy
        logger.info(
            f'Getting multi statement bucket policy from bucket: {obc_obj.bucket_name}'
        )
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # NooBaa user writes an object to bucket
        logger.info(
            f'Writing object on bucket: {obc_obj.bucket_name} with User: {noobaa_user.email_id}'
        )
        assert s3_put_object(noobaa_user, obc_obj.bucket_name, object_key,
                             data), "Failed: Put Object"

        # Verifying public read access
        logger.info(
            f'Reading object on bucket: {obc_obj.bucket_name} with User: {obc_obj.obc_account}'
        )
        assert s3_get_object(obc_obj, obc_obj.bucket_name,
                             object_key), "Failed: Get Object"

        # Verifying Delete object is denied on both Accounts
        for user in accounts:
            logger.info(
                f"Verifying whether S3:DeleteObject action is denied access for {user}"
            )
            try:
                s3_delete_object(user, obc_obj.bucket_name, object_key)
            except boto3exception.ClientError as e:
                logger.info(e.response)
                response = HttpResponseParser(e.response)
                if response.error['Code'] == 'AccessDenied':
                    logger.info(
                        f"DeleteObject failed due to: {response.error['Message']}"
                    )
                else:
                    raise UnexpectedBehaviour(
                        f"{e.response} received invalid error code {response.error['Code']}"
                    )
コード例 #20
0
    def test_bucket_policy_effect_deny(self, mcg_obj, bucket_factory):
        """
        Tests explicit "Deny" effect on bucket policy actions
        """
        data = "Sample string content to write to a new S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Creating multiple obc user (account)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin writes an object to bucket
        logger.info(
            f'Writing an object on bucket: {obc_obj.bucket_name} by Admin')
        assert s3_put_object(mcg_obj, obc_obj.bucket_name, object_key,
                             data), "Failed: PutObject"

        # Admin sets policy with Effect: Deny on obc bucket with obc-account principal
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['GetObject'],
            resources_list=[f'{obc_obj.bucket_name}/{object_key}'],
            effect="Deny")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(
            f'Getting bucket policy from bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether Get action is denied access
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to GetObject'
        )
        try:
            s3_get_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('GetObject action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Admin sets a new policy on same obc bucket with same account but with different action and resource
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['DeleteObject'],
            resources_list=[f'{obc_obj.bucket_name}/{"*"}'],
            effect="Deny")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(f'Creating bucket policy on bucket: {obc_obj.bucket_name}')
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(
            f'Getting bucket policy from bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # Verifying whether delete action is denied
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to Get object'
        )
        try:
            s3_delete_object(obc_obj, obc_obj.bucket_name, object_key)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('Get Object action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )
コード例 #21
0
    def test_mcg_namespace_mpu_crd(self, mcg_obj, awscli_pod, bucket_factory,
                                   bucketclass_dict):
        """
        Test multipart upload S3 operations on namespace buckets(created by CRDs)
        Validates create, upload, upload copy and list parts operations

        """
        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]

        ns_bucket = ns_buc.name

        object_path = f"s3://{ns_bucket}"

        logger.info(
            f"Setting up test files for mpu and aborting any mpu on bucket: {ns_bucket}"
        )
        mpu_key, origin_dir, res_dir, parts = multipart_setup(awscli_pod)
        bucket_utils.abort_all_multipart_upload(mcg_obj, ns_bucket, COPY_OBJ)

        # Initiate mpu, Upload part copy, List and Abort operations
        logger.info(
            f"Put object on bucket: {ns_bucket} to create a copy source")
        assert bucket_utils.s3_put_object(s3_obj=mcg_obj,
                                          bucketname=ns_bucket,
                                          object_key=ROOT_OBJ,
                                          data=OBJ_DATA), "Failed: PutObject"
        logger.info(
            f"Initiating mpu on bucket: {ns_bucket} with key {COPY_OBJ}")
        part_copy_id = bucket_utils.create_multipart_upload(
            mcg_obj, ns_bucket, COPY_OBJ)
        list_mpu_res = bucket_utils.list_multipart_upload(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
        if (constants.AZURE_PLATFORM
                not in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            logger.info(f"Listing in-progress mpu: {list_mpu_res}")
            assert (part_copy_id == list_mpu_res["Uploads"][0]["UploadId"]
                    ), "Invalid UploadId"

        logger.info(f"Uploading a part copy to: {ns_bucket}")
        assert bucket_utils.s3_upload_part_copy(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            copy_source=f"/{ns_bucket}/{ROOT_OBJ}",
            object_key=COPY_OBJ,
            part_number=1,
            upload_id=part_copy_id,
        ), "Failed: upload part copy"

        logger.info(
            f"Aborting initiated multipart upload with id: {part_copy_id}")
        assert bucket_utils.abort_multipart(mcg_obj, ns_bucket, COPY_OBJ,
                                            part_copy_id), "Abort failed"

        # Initiate mpu, Upload part, List parts operations
        logger.info(
            f"Initiating Multipart Upload on Bucket: {ns_bucket} with Key: {mpu_key}"
        )
        mp_upload_id = bucket_utils.create_multipart_upload(
            mcg_obj, ns_bucket, mpu_key)

        list_mpu_res = bucket_utils.list_multipart_upload(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
        if (constants.AZURE_PLATFORM
                not in bucketclass_dict["namespace_policy_dict"]
            ["namespacestore_dict"]):
            logger.info(f"Listing multipart upload: {list_mpu_res}")
            assert (mp_upload_id == list_mpu_res["Uploads"][0]["UploadId"]
                    ), "Invalid UploadId"

        logger.info(f"Uploading individual parts to the bucket: {ns_bucket}")
        uploaded_parts = bucket_utils.upload_parts(
            mcg_obj=mcg_obj,
            awscli_pod=awscli_pod,
            bucketname=ns_bucket,
            object_key=mpu_key,
            body_path=res_dir,
            upload_id=mp_upload_id,
            uploaded_parts=parts,
        )
        list_parts_res = bucket_utils.list_uploaded_parts(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=mpu_key,
            upload_id=mp_upload_id,
        )
        logger.info(f"Listing individual parts: {list_parts_res['Parts']}")
        for i, ele in enumerate(uploaded_parts):
            assert (ele["PartNumber"] == list_parts_res["Parts"][i]
                    ["PartNumber"]), "Invalid part_number"
            assert ele["ETag"] == list_parts_res["Parts"][i][
                "ETag"], "Invalid ETag"

        logger.info(f"Completing the Multipart Upload on bucket: {ns_bucket}")
        assert bucket_utils.complete_multipart_upload(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=mpu_key,
            upload_id=mp_upload_id,
            parts=uploaded_parts,
        ), "MPU did not complete"

        # Checksum validation after completing MPU
        logger.info(
            f"Downloading the completed multipart object from {ns_bucket} to aws-cli pod"
        )
        bucket_utils.sync_object_directory(podobj=awscli_pod,
                                           src=object_path,
                                           target=res_dir,
                                           s3_obj=mcg_obj)
        assert bucket_utils.verify_s3_object_integrity(
            original_object_path=f"{origin_dir}/{mpu_key}",
            result_object_path=f"{res_dir}/{mpu_key}",
            awscli_pod=awscli_pod,
        ), "Checksum comparision between original and result object failed"
コード例 #22
0
    def test_mcg_namespace_basic_s3_ops(self, mcg_obj, ns_resource_factory,
                                        bucket_factory, platform):
        """
        Test basic S3 operations on namespace buckets.

        1. Validates put, get, copy, head, get_acl, delete object operations
        2. Validates listObjects v1 and v2 with prefix, delimiter combinations with page entries

        """
        max_keys = 50

        namespace_res = ns_resource_factory(platform=platform)

        ns_bucket = bucket_factory(
            amount=1,
            interface="mcg-namespace",
            write_ns_resource=namespace_res[1],
            read_ns_resources=[namespace_res[1]],
        )[0].name

        # Put, Get, Copy, Head, Get Acl and Delete object operations
        logger.info(f"Put and Get object operation on {ns_bucket}")
        assert bucket_utils.s3_put_object(s3_obj=mcg_obj,
                                          bucketname=ns_bucket,
                                          object_key=ROOT_OBJ,
                                          data=OBJ_DATA), "Failed: PutObject"
        get_res = bucket_utils.s3_get_object(s3_obj=mcg_obj,
                                             bucketname=ns_bucket,
                                             object_key=ROOT_OBJ)

        list_response = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                        bucketname=ns_bucket)
        get_list_and_verify(list_response, [ROOT_OBJ], "Contents")

        assert bucket_utils.s3_copy_object(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            source=f"/{ns_bucket}/{ROOT_OBJ}",
            object_key=COPY_OBJ,
        ), "Failed: CopyObject"
        get_copy_res = bucket_utils.s3_get_object(s3_obj=mcg_obj,
                                                  bucketname=ns_bucket,
                                                  object_key=COPY_OBJ)
        logger.info(f"Verifying Etag of {COPY_OBJ} from Get object operations")
        assert get_copy_res["ETag"] == get_res["ETag"], "Incorrect object key"

        head_res = bucket_utils.s3_head_object(s3_obj=mcg_obj,
                                               bucketname=ns_bucket,
                                               object_key=ROOT_OBJ)
        logger.info(
            f"Verifying metadata from head_object operation: {head_res['Metadata']}"
        )
        if platform == constants.AZURE_PLATFORM:
            assert (head_res["Metadata"]["noobaa-namespace-blob-container"] ==
                    namespace_res[0]), "Invalid object metadata"
        else:
            assert (head_res["Metadata"]["noobaa-namespace-s3-bucket"] ==
                    namespace_res[0]), "Invalid object metadata"

        get_acl_res = bucket_utils.s3_get_object_acl(s3_obj=mcg_obj,
                                                     bucketname=ns_bucket,
                                                     object_key=ROOT_OBJ)
        logger.info(
            f"Verifying Get object ACl response: {get_acl_res['Grants']}")
        assert (get_acl_res["Grants"][0]["Grantee"]["DisplayName"] == "NooBaa"
                ), "Invalid grantee"

        logger.info(
            f"Deleting {ROOT_OBJ} and {COPY_OBJ} and verifying response")
        del_res = bucket_utils.s3_delete_objects(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_keys=[{
                "Key": f"{ROOT_OBJ}"
            }, {
                "Key": f"{COPY_OBJ}"
            }],
        )
        for i, key in enumerate([ROOT_OBJ, COPY_OBJ]):
            assert (key == del_res["Deleted"][i]["Key"]
                    ), "Object key not found/not-deleted"

        if not platform == constants.AZURE_PLATFORM:
            logger.info("Setting up objects to verify list operations")
            obj_keys, obj_prefixes, mid_index = setup_objects_to_list(
                amount=100,
                prefix="Drive/Folder",
                bucket_name=ns_bucket,
                mcg_obj=mcg_obj,
            )

            # List v1 and page entries
            logger.info(f"ListObjectsV1 operation on {ns_bucket}")
            list_v1_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
            get_list_and_verify(list_v1_res,
                                obj_keys,
                                "Contents",
                                version="v1")
            logger.info(
                "Get and verify next page entries of list using ListObjectV1")
            first_page_res = bucket_utils.s3_list_objects_v1(
                s3_obj=mcg_obj, bucketname=ns_bucket, max_keys=max_keys)
            last_key = get_list_and_verify(first_page_res,
                                           obj_keys[:mid_index],
                                           "Contents",
                                           version="v1")
            next_page_res = bucket_utils.s3_list_objects_v1(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                max_keys=max_keys,
                marker=last_key)
            get_list_and_verify(next_page_res,
                                obj_keys[mid_index:],
                                "Contents",
                                version="v1")

            # List v1 with prefix and page entries
            logger.info(f"ListObjectsV1 operation on {ns_bucket} with prefix")
            list_v1_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket,
                                                          prefix="Drive/")
            get_list_and_verify(list_v1_res,
                                obj_keys,
                                "Contents",
                                "Drive/",
                                version="v1")
            logger.info(
                "Get and verify next page entries of list using ListObjectV1 with prefix"
            )
            first_page_res = bucket_utils.s3_list_objects_v1(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                max_keys=max_keys)
            last_key = get_list_and_verify(first_page_res,
                                           obj_keys[:mid_index],
                                           "Contents",
                                           "Drive/",
                                           version="v1")
            next_page_res = bucket_utils.s3_list_objects_v1(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                max_keys=max_keys,
                marker=last_key,
            )
            get_list_and_verify(next_page_res,
                                obj_keys[mid_index:],
                                "Contents",
                                "Drive/",
                                version="v1")

            # List v1 with prefix, delimiter and page entries
            logger.info(
                f"ListObjectsV1 operation on {ns_bucket} with prefix and delimiter"
            )
            list_v1_res = bucket_utils.s3_list_objects_v1(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket,
                                                          prefix="Drive/",
                                                          delimiter="/")
            get_list_and_verify(list_v1_res,
                                obj_prefixes,
                                "CommonPrefixes",
                                "Drive/",
                                "/",
                                version="v1")
            logger.info(
                "Get and verify next page entries of list using ListObjectV1 with prefix and delimiter"
            )
            first_page_res = bucket_utils.s3_list_objects_v1(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                delimiter="/",
                max_keys=max_keys,
            )
            get_list_and_verify(
                first_page_res,
                obj_prefixes[:mid_index],
                "CommonPrefixes",
                "Drive/",
                "/",
                version="v1",
            )
            next_page_res = bucket_utils.s3_list_objects_v1(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                delimiter="/",
                max_keys=max_keys,
                marker=first_page_res["NextMarker"],
            )
            get_list_and_verify(
                next_page_res,
                obj_prefixes[mid_index:],
                "CommonPrefixes",
                "Drive/",
                "/",
                version="v1",
            )

            # List v2
            logger.info(f"ListObjectsV2 operation on {ns_bucket}")
            list_v2_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket)
            get_list_and_verify(list_v2_res,
                                obj_keys,
                                "Contents",
                                version="v2")
            logger.info(
                "Get and verify next page entries of list using ListObjectV2")
            first_page_res = bucket_utils.s3_list_objects_v2(
                s3_obj=mcg_obj, bucketname=ns_bucket, max_keys=max_keys)
            get_list_and_verify(first_page_res,
                                obj_keys,
                                "Contents",
                                version="v2")
            next_page_res = bucket_utils.s3_list_objects_v2(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                max_keys=max_keys,
                con_token=first_page_res["NextContinuationToken"],
            )
            get_list_and_verify(next_page_res,
                                obj_keys[mid_index:],
                                "Contents",
                                version="v2")

            # List v2 with prefix
            logger.info(f"ListObjectsV2 operation on {ns_bucket} with prefix")
            list_v2_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket,
                                                          prefix="Drive/")
            get_list_and_verify(list_v2_res,
                                obj_keys,
                                "Contents",
                                "Drive/",
                                version="v2")
            logger.info(
                "Get and verify next page entries of list using ListObjectV2 with prefix"
            )
            first_page_res = bucket_utils.s3_list_objects_v2(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                max_keys=max_keys)
            get_list_and_verify(first_page_res,
                                obj_keys[:mid_index],
                                "Contents",
                                "Drive/",
                                version="v2")
            next_page_res = bucket_utils.s3_list_objects_v2(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                max_keys=max_keys,
                con_token=first_page_res["NextContinuationToken"],
            )
            get_list_and_verify(next_page_res,
                                obj_keys[mid_index:],
                                "Contents",
                                "Drive/",
                                version="v2")

            # List v2 with prefix and delimiter
            logger.info(
                f"ListObjectsV2 operation on {ns_bucket} with prefix and delimiter"
            )
            list_v2_res = bucket_utils.s3_list_objects_v2(s3_obj=mcg_obj,
                                                          bucketname=ns_bucket,
                                                          prefix="Drive/",
                                                          delimiter="/")
            get_list_and_verify(list_v2_res,
                                obj_prefixes,
                                "CommonPrefixes",
                                "Drive/",
                                "/",
                                version="v2")
            logger.info(
                "Get and verify next page entries of list using ListObjectV2 with prefix and delimiter"
            )
            first_page_res = bucket_utils.s3_list_objects_v2(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                delimiter="/",
                max_keys=max_keys,
            )
            get_list_and_verify(
                first_page_res,
                obj_prefixes[:mid_index],
                "CommonPrefixes",
                "Drive/",
                "/",
                version="v2",
            )
            next_page_res = bucket_utils.s3_list_objects_v2(
                s3_obj=mcg_obj,
                bucketname=ns_bucket,
                prefix="Drive/",
                delimiter="/",
                max_keys=max_keys,
                con_token=first_page_res["NextContinuationToken"],
            )
            get_list_and_verify(
                next_page_res,
                obj_prefixes[mid_index:],
                "CommonPrefixes",
                "Drive/",
                "/",
                version="v2",
            )
コード例 #23
0
    def test_mcg_namespace_disruptions_crd(
        self,
        mcg_obj,
        cld_mgr,
        awscli_pod,
        bucketclass_dict,
        bucket_factory,
        node_drain_teardown,
    ):
        """
        Test MCG namespace disruption flow

        1. Create NS resources with CRDs
        2. Create NS bucket with CRDs
        3. Upload to NS bucket
        4. Delete noobaa related pods and verify integrity of objects
        5. Create public access policy on NS bucket and verify Get op
        6. Drain nodes containing noobaa pods and verify integrity of objects
        7. Perform put operation to validate public access denial
        7. Edit/verify and remove objects on NS bucket

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        awscli_node_name = awscli_pod.get()["spec"]["nodeName"]

        aws_s3_creds = {
            "access_key_id": cld_mgr.aws_client.access_key,
            "access_key": cld_mgr.aws_client.secret_key,
            "endpoint": constants.MCG_NS_AWS_ENDPOINT,
            "region": config.ENV_DATA["region"],
        }

        # S3 account details
        user_name = "nb-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        logger.info("Setting up test files for upload, to the bucket/resources")
        setup_base_objects(awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3)

        # Create the namespace resource and verify health
        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]
        ns_bucket = ns_buc.name

        aws_target_bucket = ns_buc.bucketclass.namespacestores[0].uls_name

        logger.info(f"Namespace bucket: {ns_bucket} created")

        logger.info(f"Uploading objects to ns bucket: {ns_bucket}")
        sync_object_directory(
            awscli_pod,
            src=MCG_NS_ORIGINAL_DIR,
            target=f"s3://{ns_bucket}",
            s3_obj=mcg_obj,
        )

        for pod_to_respin in self.labels_map:
            logger.info(f"Re-spinning mcg resource: {self.labels_map[pod_to_respin]}")
            pod_obj = pod.Pod(
                **pod.get_pods_having_label(
                    label=self.labels_map[pod_to_respin],
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                )[0]
            )

            pod_obj.delete(force=True)

            assert pod_obj.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=self.labels_map[pod_to_respin],
                resource_count=1,
                timeout=300,
            )

            logger.info(
                f"Downloading objects from ns bucket: {ns_bucket} "
                f"after re-spinning: {self.labels_map[pod_to_respin]}"
            )
            sync_object_directory(
                awscli_pod,
                src=f"s3://{ns_bucket}",
                target=MCG_NS_RESULT_DIR,
                s3_obj=mcg_obj,
            )

            logger.info(
                f"Verifying integrity of objects "
                f"after re-spinning: {self.labels_map[pod_to_respin]}"
            )
            compare_directory(
                awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3
            )

        # S3 account
        user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[ns_bucket])
        logger.info(f"Noobaa account: {user.email_id} with S3 access created")

        # Admin sets Public access policy(*)
        bucket_policy_generated = gen_bucket_policy(
            user_list=["*"],
            actions_list=["GetObject"],
            resources_list=[f'{ns_bucket}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {ns_bucket} with wildcard (*) Principal"
        )
        put_policy = put_bucket_policy(mcg_obj, ns_bucket, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        logger.info(f"Getting bucket policy on bucket: {ns_bucket}")
        get_policy = get_bucket_policy(mcg_obj, ns_bucket)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # MCG admin writes an object to bucket
        logger.info(f"Writing object on bucket: {ns_bucket} by admin")
        assert s3_put_object(mcg_obj, ns_bucket, object_key, data), "Failed: PutObject"

        # Verifying whether Get operation is allowed to any S3 user
        logger.info(
            f"Get object action on namespace bucket: {ns_bucket} "
            f"with user: {user.email_id}"
        )
        assert s3_get_object(user, ns_bucket, object_key), "Failed: GetObject"

        # Upload files to NS target
        logger.info(
            f"Uploading objects directly to ns resource target: {aws_target_bucket}"
        )
        sync_object_directory(
            awscli_pod,
            src=MCG_NS_ORIGINAL_DIR,
            target=f"s3://{aws_target_bucket}",
            signed_request_creds=aws_s3_creds,
        )

        for pod_to_drain in self.labels_map:
            pod_obj = pod.Pod(
                **pod.get_pods_having_label(
                    label=self.labels_map[pod_to_drain],
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                )[0]
            )

            # Retrieve the node name on which the pod resides
            node_name = pod_obj.get()["spec"]["nodeName"]

            if awscli_node_name == node_name:
                logger.info(
                    f"Skipping node drain since aws cli pod node: "
                    f"{awscli_node_name} is same as {pod_to_drain} "
                    f"pod node: {node_name}"
                )
                continue

            # Drain the node
            drain_nodes([node_name])
            wait_for_nodes_status(
                [node_name], status=constants.NODE_READY_SCHEDULING_DISABLED
            )
            schedule_nodes([node_name])
            wait_for_nodes_status(timeout=300)

            # Retrieve the new pod
            pod_obj = pod.Pod(
                **pod.get_pods_having_label(
                    label=self.labels_map[pod_to_drain],
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                )[0]
            )
            wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout=120)

            # Verify all storage pods are running
            wait_for_storage_pods()

            logger.info(
                f"Downloading objects from ns bucket: {ns_bucket} "
                f"after draining node: {node_name} with pod {pod_to_drain}"
            )
            sync_object_directory(
                awscli_pod,
                src=f"s3://{ns_bucket}",
                target=MCG_NS_RESULT_DIR,
                s3_obj=mcg_obj,
            )

            logger.info(
                f"Verifying integrity of objects "
                f"after draining node with pod: {pod_to_drain}"
            )
            compare_directory(
                awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3
            )

        logger.info(f"Editing the namespace resource bucket: {ns_bucket}")
        namespace_bucket_update(
            mcg_obj,
            bucket_name=ns_bucket,
            read_resource=[aws_target_bucket],
            write_resource=aws_target_bucket,
        )

        logger.info(f"Verifying object download after edit on ns bucket: {ns_bucket}")
        sync_object_directory(
            awscli_pod,
            src=f"s3://{ns_bucket}",
            target=MCG_NS_RESULT_DIR,
            s3_obj=mcg_obj,
        )

        # Verifying whether Put object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} has only public read access"
        )

        logger.info(f"Removing objects from ns bucket: {ns_bucket}")
        rm_object_recursive(awscli_pod, target=ns_bucket, mcg_obj=mcg_obj)
コード例 #24
0
    def test_bucket_website_and_policies(self, mcg_obj, bucket_factory):
        """
        Tests bucket website bucket policy actions
        """
        # Creating a OBC (account)
        obc = bucket_factory(amount=1, interface='OC')
        obc_obj = OBC(obc[0].name)

        # Admin sets policy with Put/Get bucket website actions
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=bucket_website_action_list,
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
            effect="Allow")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy for bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(f"Adding bucket website config to: {obc_obj.bucket_name}")
        assert s3_put_bucket_website(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name,
            website_config=website_config), "Failed: PutBucketWebsite"
        logger.info(
            f"Getting bucket website config from: {obc_obj.bucket_name}")
        assert s3_get_bucket_website(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: GetBucketWebsite"

        logger.info("Writing index and error data to the bucket")
        assert s3_put_object(s3_obj=obc_obj,
                             bucketname=obc_obj.bucket_name,
                             object_key="index.html",
                             data=index,
                             content_type='text/html'), "Failed: PutObject"
        assert s3_put_object(s3_obj=obc_obj,
                             bucketname=obc_obj.bucket_name,
                             object_key="error.html",
                             data=error,
                             content_type='text/html'), "Failed: PutObject"

        # Verifying whether DeleteBucketWebsite action is denied access
        logger.info(
            f'Verifying whether user: {obc_obj.obc_account} is denied to DeleteBucketWebsite'
        )
        try:
            s3_delete_bucket_website(s3_obj=obc_obj,
                                     bucketname=obc_obj.bucket_name)
        except boto3exception.ClientError as e:
            logger.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error['Code'] == 'AccessDenied':
                logger.info('GetObject action has been denied access')
            else:
                raise UnexpectedBehaviour(
                    f"{e.response} received invalid error code {response.error['Code']}"
                )

        # Admin modifies policy to allow DeleteBucketWebsite action
        bucket_policy_generated = gen_bucket_policy(
            user_list=obc_obj.obc_account,
            actions_list=['DeleteBucketWebsite'],
            resources_list=[
                obc_obj.bucket_name, f'{obc_obj.bucket_name}/{"*"}'
            ],
            effect="Allow")
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}'
        )
        assert put_bucket_policy(mcg_obj, obc_obj.bucket_name,
                                 bucket_policy), "Failed: PutBucketPolicy"

        # Getting Policy
        logger.info(f'Getting bucket policy for bucket: {obc_obj.bucket_name}')
        get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        logger.info(
            f"Deleting bucket website config from bucket: {obc_obj.bucket_name}"
        )
        assert s3_delete_bucket_website(
            s3_obj=obc_obj,
            bucketname=obc_obj.bucket_name), "Failed: DeleteBucketWebsite"