def test_anonymous_read_only(self, mcg_obj, bucket_factory): """ Tests read only access by an anonymous user """ data = "Sample string content to write to a new S3 object" object_key = "ObjKey-" + str(uuid.uuid4().hex) user_name = "noobaa-user" + str(uuid.uuid4().hex) email = user_name + "@mail.com" # Creating a s3 bucket s3_bucket = bucket_factory(amount=1, interface='S3')[0] # Creating a random user account user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[s3_bucket.name]) # Admin sets policy all users '*' (Public access) bucket_policy_generated = gen_bucket_policy( user_list=["*"], actions_list=['GetObject'], resources_list=[f'{s3_bucket.name}/{"*"}']) bucket_policy = json.dumps(bucket_policy_generated) logger.info( f'Creating bucket policy on bucket: {s3_bucket.name} with wildcard (*) Principal' ) put_policy = helpers.put_bucket_policy(mcg_obj, s3_bucket.name, bucket_policy) logger.info(f'Put bucket policy response from Admin: {put_policy}') # Getting Policy logger.info(f'Getting bucket policy on bucket: {s3_bucket.name}') get_policy = helpers.get_bucket_policy(mcg_obj, s3_bucket.name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # Admin writes an object to bucket logger.info(f'Writing object on bucket: {s3_bucket.name} by admin') assert helpers.s3_put_object(mcg_obj, s3_bucket.name, object_key, data), "Failed: PutObject" # Reading the object by anonymous user logger.info( f'Getting object by user: {user.email_id} on bucket: {s3_bucket.name} ' ) assert helpers.s3_get_object( user, s3_bucket.name, object_key), f"Failed: Get Object by user {user.email_id}"
def test_mcg_namespace_lifecycle_crd( self, mcg_obj, cld_mgr, awscli_pod, bucket_factory, test_directory_setup, bucketclass_dict, ): """ Test MCG namespace resource/bucket lifecycle using CRDs 1. Create namespace resources with CRDs 2. Create namespace bucket with CRDs 3. Set bucket policy on namespace bucket with a S3 user principal 4. Verify bucket policy. 5. Read/write directly on namespace resource target. 6. Edit the namespace bucket 7. Delete namespace resource and bucket """ data = "Sample string content to write to a S3 object" object_key = "ObjKey-" + str(uuid.uuid4().hex) if (constants.RGW_PLATFORM in bucketclass_dict["namespace_policy_dict"] ["namespacestore_dict"]): s3_creds = { "access_key_id": cld_mgr.rgw_client.access_key, "access_key": cld_mgr.rgw_client.secret_key, "endpoint": cld_mgr.rgw_client.endpoint, } else: s3_creds = { "access_key_id": cld_mgr.aws_client.access_key, "access_key": cld_mgr.aws_client.secret_key, "endpoint": constants.MCG_NS_AWS_ENDPOINT, "region": config.ENV_DATA["region"], } # Noobaa s3 account details user_name = "noobaa-user" + str(uuid.uuid4().hex) email = user_name + "@mail.com" # Create the namespace resource and bucket ns_bucket = bucket_factory( amount=1, interface=bucketclass_dict["interface"], bucketclass=bucketclass_dict, )[0] aws_target_bucket = ns_bucket.bucketclass.namespacestores[0].uls_name logger.info(f"Namespace bucket: {ns_bucket.name} created") # Noobaa S3 account user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[ns_bucket.name]) logger.info(f"Noobaa account: {user.email_id} with S3 access created") bucket_policy_generated = gen_bucket_policy( user_list=[user.email_id], actions_list=["DeleteObject"], effect="Deny", resources_list=[f'{ns_bucket.name}/{"*"}'], ) bucket_policy = json.dumps(bucket_policy_generated) logger.info( f"Creating bucket policy on bucket: {ns_bucket.name} with wildcard (*) Principal" ) put_policy = put_bucket_policy(mcg_obj, ns_bucket.name, bucket_policy) logger.info(f"Put bucket policy response from Admin: {put_policy}") # Getting Policy logger.info(f"Getting bucket policy on bucket: {ns_bucket.name}") get_policy = get_bucket_policy(mcg_obj, ns_bucket.name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # MCG admin writes an object to bucket logger.info(f"Writing object on bucket: {ns_bucket.name} by admin") assert s3_put_object(mcg_obj, ns_bucket.name, object_key, data), "Failed: PutObject" # Verifying whether Get & Put object is allowed to S3 user logger.info(f"Get object action on namespace bucket: {ns_bucket.name}" f" with user: {user.email_id}") assert s3_get_object(user, ns_bucket.name, object_key), "Failed: GetObject" logger.info(f"Put object action on namespace bucket: {ns_bucket.name}" f" with user: {user.email_id}") assert s3_put_object(user, ns_bucket.name, object_key, data), "Failed: PutObject" # Verifying whether Delete object action is denied logger.info(f"Verifying whether user: {user.email_id} " f"is denied to Delete object after updating policy") try: s3_delete_object(user, ns_bucket.name, object_key) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error["Code"] == "AccessDenied": logger.info("Delete object action has been denied access") else: raise UnexpectedBehaviour( f"{e.response} received invalid error code " f"{response.error['Code']}") else: assert ( False ), "Delete object operation was granted access, when it should have denied" logger.info( "Setting up test files for upload, to the bucket/resources") setup_base_objects(awscli_pod, test_directory_setup.origin_dir, amount=3) # Upload files directly to NS resources logger.info( f"Uploading objects directly to ns resource target: {aws_target_bucket}" ) sync_object_directory( awscli_pod, src=test_directory_setup.origin_dir, target=f"s3://{aws_target_bucket}", signed_request_creds=s3_creds, ) # Read files directly from NS resources logger.info( f"Downloading objects directly from ns resource target: {aws_target_bucket}" ) sync_object_directory( awscli_pod, src=f"s3://{aws_target_bucket}", target=test_directory_setup.result_dir, signed_request_creds=s3_creds, ) # Edit namespace bucket logger.info(f"Editing the namespace resource bucket: {ns_bucket.name}") namespace_bucket_update( mcg_obj, bucket_name=ns_bucket.name, read_resource=[aws_target_bucket], write_resource=aws_target_bucket, ) # Verify Download after editing bucket logger.info( f"Downloading objects directly from ns bucket target: {ns_bucket.name}" ) sync_object_directory( awscli_pod, src=f"s3://{ns_bucket.name}", target=test_directory_setup.result_dir, s3_obj=mcg_obj, ) # MCG namespace bucket delete logger.info( f"Deleting all objects on namespace resource bucket: {ns_bucket.name}" ) rm_object_recursive(awscli_pod, ns_bucket.name, mcg_obj) # Namespace resource delete logger.info(f"Deleting the resource: {aws_target_bucket}") mcg_obj.delete_ns_resource(ns_resource_name=aws_target_bucket)
def test_mcg_namespace_disruptions_crd( self, mcg_obj, cld_mgr, awscli_pod, bucketclass_dict, bucket_factory, node_drain_teardown, ): """ Test MCG namespace disruption flow 1. Create NS resources with CRDs 2. Create NS bucket with CRDs 3. Upload to NS bucket 4. Delete noobaa related pods and verify integrity of objects 5. Create public access policy on NS bucket and verify Get op 6. Drain nodes containing noobaa pods and verify integrity of objects 7. Perform put operation to validate public access denial 7. Edit/verify and remove objects on NS bucket """ data = "Sample string content to write to a S3 object" object_key = "ObjKey-" + str(uuid.uuid4().hex) awscli_node_name = awscli_pod.get()["spec"]["nodeName"] aws_s3_creds = { "access_key_id": cld_mgr.aws_client.access_key, "access_key": cld_mgr.aws_client.secret_key, "endpoint": constants.MCG_NS_AWS_ENDPOINT, "region": config.ENV_DATA["region"], } # S3 account details user_name = "nb-user" + str(uuid.uuid4().hex) email = user_name + "@mail.com" logger.info("Setting up test files for upload, to the bucket/resources") setup_base_objects(awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3) # Create the namespace resource and verify health ns_buc = bucket_factory( amount=1, interface=bucketclass_dict["interface"], bucketclass=bucketclass_dict, )[0] ns_bucket = ns_buc.name aws_target_bucket = ns_buc.bucketclass.namespacestores[0].uls_name logger.info(f"Namespace bucket: {ns_bucket} created") logger.info(f"Uploading objects to ns bucket: {ns_bucket}") sync_object_directory( awscli_pod, src=MCG_NS_ORIGINAL_DIR, target=f"s3://{ns_bucket}", s3_obj=mcg_obj, ) for pod_to_respin in self.labels_map: logger.info(f"Re-spinning mcg resource: {self.labels_map[pod_to_respin]}") pod_obj = pod.Pod( **pod.get_pods_having_label( label=self.labels_map[pod_to_respin], namespace=defaults.ROOK_CLUSTER_NAMESPACE, )[0] ) pod_obj.delete(force=True) assert pod_obj.ocp.wait_for_resource( condition=constants.STATUS_RUNNING, selector=self.labels_map[pod_to_respin], resource_count=1, timeout=300, ) logger.info( f"Downloading objects from ns bucket: {ns_bucket} " f"after re-spinning: {self.labels_map[pod_to_respin]}" ) sync_object_directory( awscli_pod, src=f"s3://{ns_bucket}", target=MCG_NS_RESULT_DIR, s3_obj=mcg_obj, ) logger.info( f"Verifying integrity of objects " f"after re-spinning: {self.labels_map[pod_to_respin]}" ) compare_directory( awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3 ) # S3 account user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[ns_bucket]) logger.info(f"Noobaa account: {user.email_id} with S3 access created") # Admin sets Public access policy(*) bucket_policy_generated = gen_bucket_policy( user_list=["*"], actions_list=["GetObject"], resources_list=[f'{ns_bucket}/{"*"}'], ) bucket_policy = json.dumps(bucket_policy_generated) logger.info( f"Creating bucket policy on bucket: {ns_bucket} with wildcard (*) Principal" ) put_policy = put_bucket_policy(mcg_obj, ns_bucket, bucket_policy) logger.info(f"Put bucket policy response from Admin: {put_policy}") logger.info(f"Getting bucket policy on bucket: {ns_bucket}") get_policy = get_bucket_policy(mcg_obj, ns_bucket) logger.info(f"Got bucket policy: {get_policy['Policy']}") # MCG admin writes an object to bucket logger.info(f"Writing object on bucket: {ns_bucket} by admin") assert s3_put_object(mcg_obj, ns_bucket, object_key, data), "Failed: PutObject" # Verifying whether Get operation is allowed to any S3 user logger.info( f"Get object action on namespace bucket: {ns_bucket} " f"with user: {user.email_id}" ) assert s3_get_object(user, ns_bucket, object_key), "Failed: GetObject" # Upload files to NS target logger.info( f"Uploading objects directly to ns resource target: {aws_target_bucket}" ) sync_object_directory( awscli_pod, src=MCG_NS_ORIGINAL_DIR, target=f"s3://{aws_target_bucket}", signed_request_creds=aws_s3_creds, ) for pod_to_drain in self.labels_map: pod_obj = pod.Pod( **pod.get_pods_having_label( label=self.labels_map[pod_to_drain], namespace=defaults.ROOK_CLUSTER_NAMESPACE, )[0] ) # Retrieve the node name on which the pod resides node_name = pod_obj.get()["spec"]["nodeName"] if awscli_node_name == node_name: logger.info( f"Skipping node drain since aws cli pod node: " f"{awscli_node_name} is same as {pod_to_drain} " f"pod node: {node_name}" ) continue # Drain the node drain_nodes([node_name]) wait_for_nodes_status( [node_name], status=constants.NODE_READY_SCHEDULING_DISABLED ) schedule_nodes([node_name]) wait_for_nodes_status(timeout=300) # Retrieve the new pod pod_obj = pod.Pod( **pod.get_pods_having_label( label=self.labels_map[pod_to_drain], namespace=defaults.ROOK_CLUSTER_NAMESPACE, )[0] ) wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout=120) # Verify all storage pods are running wait_for_storage_pods() logger.info( f"Downloading objects from ns bucket: {ns_bucket} " f"after draining node: {node_name} with pod {pod_to_drain}" ) sync_object_directory( awscli_pod, src=f"s3://{ns_bucket}", target=MCG_NS_RESULT_DIR, s3_obj=mcg_obj, ) logger.info( f"Verifying integrity of objects " f"after draining node with pod: {pod_to_drain}" ) compare_directory( awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3 ) logger.info(f"Editing the namespace resource bucket: {ns_bucket}") namespace_bucket_update( mcg_obj, bucket_name=ns_bucket, read_resource=[aws_target_bucket], write_resource=aws_target_bucket, ) logger.info(f"Verifying object download after edit on ns bucket: {ns_bucket}") sync_object_directory( awscli_pod, src=f"s3://{ns_bucket}", target=MCG_NS_RESULT_DIR, s3_obj=mcg_obj, ) # Verifying whether Put object action is denied logger.info( f"Verifying whether user: {user.email_id} has only public read access" ) logger.info(f"Removing objects from ns bucket: {ns_bucket}") rm_object_recursive(awscli_pod, target=ns_bucket, mcg_obj=mcg_obj)
def test_bucket_policy_multi_statement(self, mcg_obj, bucket_factory): """ Tests multiple statements in a bucket policy """ data = "Sample string content to write to a new S3 object" object_key = "ObjKey-" + str(uuid.uuid4().hex) user_name = "noobaa-user" + str(uuid.uuid4().hex) email = user_name + "@mail.com" # Creating OBC (account) and Noobaa user account obc = bucket_factory(amount=1, interface='OC') obc_obj = OBC(mcg_obj, obc=obc[0].name) noobaa_user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[obc_obj.bucket_name]) accounts = [obc_obj, noobaa_user] # Statement_1 public read access to a bucket single_statement_policy = gen_bucket_policy( sid="statement-1", user_list=["*"], actions_list=['GetObject'], resources_list=[f'{obc_obj.bucket_name}/{"*"}'], effect="Allow") # Additional Statements; Statement_2 - PutObject permission on specific user # Statement_3 - Denying Permission to DeleteObject action for aultiple Users new_statements = { "statement_2": { 'Action': 's3:PutObject', 'Effect': 'Allow', 'Principal': noobaa_user.email_id, 'Resource': [f'arn:aws:s3:::{obc_obj.bucket_name}/{"*"}'], 'Sid': 'Statement-2' }, "statement_3": { 'Action': 's3:DeleteObject', 'Effect': 'Deny', 'Principal': [obc_obj.obc_account, noobaa_user.email_id], 'Resource': [f'arn:aws:s3:::{"*"}'], 'Sid': 'Statement-3' } } for key, value in new_statements.items(): single_statement_policy["Statement"].append(value) logger.info(f"New policy {single_statement_policy}") bucket_policy = json.dumps(single_statement_policy) # Creating Policy logger.info( f'Creating multi statement bucket policy on bucket: {obc_obj.bucket_name}' ) assert helpers.put_bucket_policy( mcg_obj, obc_obj.bucket_name, bucket_policy), "Failed: PutBucketPolicy " # Getting Policy logger.info( f'Getting multi statement bucket policy from bucket: {obc_obj.bucket_name}' ) get_policy = helpers.get_bucket_policy(mcg_obj, obc_obj.bucket_name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # NooBaa user writes an object to bucket logger.info( f'Writing object on bucket: {obc_obj.bucket_name} with User: {noobaa_user.email_id}' ) assert helpers.s3_put_object(noobaa_user, obc_obj.bucket_name, object_key, data), "Failed: Put Object" # Verifying public read access logger.info( f'Reading object on bucket: {obc_obj.bucket_name} with User: {obc_obj.obc_account}' ) assert helpers.s3_get_object(obc_obj, obc_obj.bucket_name, object_key), "Failed: Get Object" # Verifying Delete object is denied on both Accounts for user in accounts: logger.info( f"Verifying whether S3:DeleteObject action is denied access for {user}" ) try: helpers.s3_delete_object(user, obc_obj.bucket_name, object_key) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error['Code'] == 'AccessDenied': logger.info( f"DeleteObject failed due to: {response.error['Message']}" ) else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" )
def test_object_actions(self, mcg_obj, bucket_factory): """ Test to verify different object actions and cross account access to buckets """ data = "Sample string content to write to a new S3 object" object_key = "ObjKey-" + str(uuid.uuid4().hex) # Creating multiple obc users (accounts) obc = bucket_factory(amount=1, interface='OC') obc_obj = OBC(mcg_obj, obc=obc[0].name) # Admin sets policy on obc bucket with obc account principal bucket_policy_generated = gen_bucket_policy( user_list=obc_obj.obc_account, actions_list=['PutObject'], resources_list=[f'{obc_obj.bucket_name}/{"*"}']) bucket_policy = json.dumps(bucket_policy_generated) logger.info( f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}' ) put_policy = helpers.put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy) logger.info(f'Put bucket policy response from Admin: {put_policy}') # Get Policy logger.info(f'Getting Bucket policy on bucket: {obc_obj.bucket_name}') get_policy = helpers.get_bucket_policy(mcg_obj, obc_obj.bucket_name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # Verifying whether obc account can put object logger.info(f'Adding object on bucket: {obc_obj.bucket_name}') assert helpers.s3_put_object(obc_obj, obc_obj.bucket_name, object_key, data), "Failed: Put Object" # Verifying whether Get action is not allowed logger.info( f'Verifying whether user: {obc_obj.obc_account} is denied to Get object' ) try: helpers.s3_get_object(obc_obj, obc_obj.bucket_name, object_key) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error['Code'] == 'AccessDenied': logger.info('Get Object action has been denied access') else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" ) # Verifying whether obc account allowed to create multipart logger.info( f'Creating multipart on bucket: {obc_obj.bucket_name} with key: {object_key}' ) helpers.create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key) # Verifying whether obc account is denied access to delete object logger.info( f'Verifying whether user: {obc_obj.obc_account} is denied to Delete object' ) try: helpers.s3_delete_object(obc_obj, obc_obj.bucket_name, object_key) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error['Code'] == 'AccessDenied': logger.info('Delete action has been denied access') else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" ) # Creating noobaa account to access bucket belonging to obc account user_name = "noobaa-user" + str(uuid.uuid4().hex) email = user_name + "@mail.com" user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[obc_obj.bucket_name]) # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access) new_policy_generated = gen_bucket_policy( user_list=user.email_id, actions_list=['GetObject', 'DeleteObject'], resources_list=[f'{obc_obj.bucket_name}/{"*"}']) new_policy = json.dumps(new_policy_generated) logger.info( f'Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}' ) put_policy = helpers.put_bucket_policy(mcg_obj, obc_obj.bucket_name, new_policy) logger.info(f'Put bucket policy response from admin: {put_policy}') # Get Policy logger.info(f'Getting bucket policy on bucket: {obc_obj.bucket_name}') get_policy = helpers.get_bucket_policy(mcg_obj, obc_obj.bucket_name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # Verifying whether Get, Delete object is allowed logger.info( f'Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}' ) assert helpers.s3_get_object(user, obc_obj.bucket_name, object_key), "Failed: Get Object" logger.info( f'Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}' ) assert helpers.s3_delete_object(user, obc_obj.bucket_name, object_key), "Failed: Delete Object" # Verifying whether Put object action is denied logger.info( f'Verifying whether user: {user.email_id} is denied to Put object after updating policy' ) try: helpers.s3_put_object(user, obc_obj.bucket_name, object_key, data) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error['Code'] == 'AccessDenied': logger.info('Put object action has been denied access') else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" )
def test_public_website(self, mcg_obj, bucket_factory): """ Tests public bucket website access """ # Creating a S3 bucket to host website s3_bucket = bucket_factory(amount=1, interface="S3") # Creating random S3 users users = [] account1 = "noobaa-user1" + str(uuid.uuid4().hex) account2 = "noobaa-user2" + str(uuid.uuid4().hex) for account in account1, account2: users.append( NoobaaAccount( mcg=mcg_obj, name=account, email=f"{account}@mail.com", buckets=[s3_bucket[0].name], ) ) logger.info(f"Adding bucket website config to: {s3_bucket[0].name}") assert s3_put_bucket_website( s3_obj=mcg_obj, bucketname=s3_bucket[0].name, website_config=website_config, ), "Failed: PutBucketWebsite" logger.info(f"Getting bucket website config from: {s3_bucket[0].name}") assert s3_get_bucket_website( s3_obj=mcg_obj, bucketname=s3_bucket[0].name ), "Failed: GetBucketWebsite" logger.info("Writing index and error data to the bucket") assert s3_put_object( s3_obj=mcg_obj, bucketname=s3_bucket[0].name, object_key="index.html", data=index, content_type="text/html", ), "Failed: PutObject" assert s3_put_object( s3_obj=mcg_obj, bucketname=s3_bucket[0].name, object_key="error.html", data=error, content_type="text/html", ), "Failed: PutObject" # Setting Get(read) policy action for all users(public) bucket_policy_generated = gen_bucket_policy( sid="PublicRead", user_list=["*"], actions_list=["GetObject"], resources_list=[f"{s3_bucket[0].name}/{'*'}"], effect="Allow", ) bucket_policy = json.dumps(bucket_policy_generated) logger.info( f"Creating bucket policy on bucket: {s3_bucket[0].name} with public access" ) assert put_bucket_policy( mcg_obj, s3_bucket[0].name, bucket_policy ), "Failed: PutBucketPolicy" # Getting Policy logger.info(f"Getting bucket policy for bucket: {s3_bucket[0].name}") get_policy = get_bucket_policy(mcg_obj, s3_bucket[0].name) logger.info(f"Bucket policy: {get_policy['Policy']}") # Verifying GetObject by reading the index of the website by anonymous users for user in users: logger.info( f"Getting object using user: {user.email_id} on bucket: {s3_bucket[0].name} " ) assert s3_get_object( user, s3_bucket[0].name, "index.html" ), f"Failed: Get Object by user {user.email_id}"
def test_object_actions(self, mcg_obj, bucket_factory): """ Test to verify different object actions and cross account access to buckets """ data = "Sample string content to write to a new S3 object" object_key = "ObjKey-" + str(uuid.uuid4().hex) # Creating multiple obc users (accounts) obc = bucket_factory(amount=1, interface="OC") obc_obj = OBC(obc[0].name) # Creating noobaa account to access bucket belonging to obc account user_name = "noobaa-user" + str(uuid.uuid4().hex) email = user_name + "@mail.com" user = NoobaaAccount( mcg_obj, name=user_name, email=email, buckets=[obc_obj.bucket_name] ) # Admin sets policy on obc bucket with obc account principal bucket_policy_generated = gen_bucket_policy( user_list=[obc_obj.obc_account, user.email_id], actions_list=["PutObject"] if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6 else ["GetObject", "DeleteObject"], effect="Allow" if version.get_semantic_ocs_version_from_config() <= version.VERSION_4_6 else "Deny", resources_list=[f'{obc_obj.bucket_name}/{"*"}'], ) bucket_policy = json.dumps(bucket_policy_generated) logger.info( f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}" ) put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy) logger.info(f"Put bucket policy response from Admin: {put_policy}") # Get Policy logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}") get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # Verifying whether users can put object logger.info( f"Adding object on bucket: {obc_obj.bucket_name} using user: {obc_obj.obc_account}" ) assert s3_put_object( obc_obj, obc_obj.bucket_name, object_key, data ), "Failed: Put Object" logger.info( f"Adding object on bucket: {obc_obj.bucket_name} using user: {user.email_id}" ) assert s3_put_object( user, obc_obj.bucket_name, object_key, data ), "Failed: Put Object" # Verifying whether Get action is not allowed logger.info( f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}' f" is denied to Get object" ) try: if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6: s3_get_object(user, obc_obj.bucket_name, object_key) else: s3_get_object(obc_obj, obc_obj.bucket_name, object_key) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error["Code"] == "AccessDenied": logger.info("Get Object action has been denied access") else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" ) else: assert False, "Get object succeeded when it should have failed" if version.get_semantic_ocs_version_from_config() == version.VERSION_4_6: logger.info( f"Verifying whether the user: "******"{obc_obj.obc_account} is able to access Get action" f"irrespective of the policy set" ) assert s3_get_object( obc_obj, obc_obj.bucket_name, object_key ), "Failed: Get Object" # Verifying whether obc account allowed to create multipart logger.info( f"Creating multipart on bucket: {obc_obj.bucket_name}" f" with key: {object_key} using user: {obc_obj.obc_account}" ) create_multipart_upload(obc_obj, obc_obj.bucket_name, object_key) # Verifying whether S3 user is allowed to create multipart logger.info( f"Creating multipart on bucket: {obc_obj.bucket_name} " f"with key: {object_key} using user: {user.email_id}" ) create_multipart_upload(user, obc_obj.bucket_name, object_key) # Verifying whether obc account is denied access to delete object logger.info( f"Verifying whether user: "******"ocs_version"]) >= 4.6 else obc_obj.obc_account}' f"is denied to Delete object" ) try: if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6: s3_delete_object(user, obc_obj.bucket_name, object_key) else: s3_delete_object(obc_obj, obc_obj.bucket_name, object_key) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error["Code"] == "AccessDenied": logger.info("Delete action has been denied access") else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" ) else: assert False, "Delete object succeeded when it should have failed" # Admin sets a policy on obc-account bucket with noobaa-account principal (cross account access) new_policy_generated = gen_bucket_policy( user_list=[user.email_id], actions_list=["GetObject", "DeleteObject"] if float(config.ENV_DATA["ocs_version"]) <= 4.6 else ["PutObject"], effect="Allow" if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_6 else "Deny", resources_list=[f'{obc_obj.bucket_name}/{"*"}'], ) new_policy = json.dumps(new_policy_generated) logger.info( f"Creating bucket policy on bucket: {obc_obj.bucket_name} with principal: {obc_obj.obc_account}" ) put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, new_policy) logger.info(f"Put bucket policy response from admin: {put_policy}") # Get Policy logger.info(f"Getting bucket policy on bucket: {obc_obj.bucket_name}") get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name) logger.info(f"Got bucket policy: {get_policy['Policy']}") # Verifying whether Get, Delete object is allowed logger.info( f"Getting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}" ) for get_resp in TimeoutSampler( 30, 4, s3_get_object, user, obc_obj.bucket_name, object_key ): if "403" not in str(get_resp["ResponseMetadata"]["HTTPStatusCode"]): logger.info("GetObj operation successful") break else: logger.info("GetObj operation is denied access") logger.info( f"Deleting object on bucket: {obc_obj.bucket_name} with user: {user.email_id}" ) for del_resp in TimeoutSampler( 30, 4, s3_delete_object, user, obc_obj.bucket_name, object_key ): if "403" not in str(del_resp["ResponseMetadata"]["HTTPStatusCode"]): logger.info("DeleteObj operation successful") break else: logger.info("DeleteObj operation is denied access") # Verifying whether Put object action is denied logger.info( f"Verifying whether user: {user.email_id} is denied to Put object after updating policy" ) try: s3_put_object(user, obc_obj.bucket_name, object_key, data) except boto3exception.ClientError as e: logger.info(e.response) response = HttpResponseParser(e.response) if response.error["Code"] == "AccessDenied": logger.info("Put object action has been denied access") else: raise UnexpectedBehaviour( f"{e.response} received invalid error code {response.error['Code']}" )