def test_bucket_delete_with_objects(self, mcg_obj, interface, awscli_pod):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucket_map = {'s3': S3Bucket, 'oc': OCBucket, 'cli': CLIBucket}
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type=interface.lower())
        try:
            bucket = bucket_map[interface.lower()](mcg_obj, bucketname)

            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            full_object_path = f"s3://{bucketname}"
            helpers.retrieve_test_objects_to_pod(awscli_pod, data_dir)
            helpers.sync_object_directory(awscli_pod, data_dir,
                                          full_object_path, mcg_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                    assert not s3_del, (
                        "Unexpected s3 delete non-empty OBC succeed")
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(err), (
                        "Couldn't verify delete non-empty OBC with s3")
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
    def test_check_object_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1)[0].name
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {result_dir}')
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = helpers.retrieve_test_objects_to_pod(
            awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        helpers.sync_object_directory(awscli_pod, original_dir,
                                      full_object_path, mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info(f'Downloading all objects from MCG bucket to awscli pod')
        helpers.sync_object_directory(awscli_pod, full_object_path, result_dir,
                                      mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
    def test_empty_file_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test write empty files to bucket and check integrity
        """
        original_dir = '/data'
        result_dir = "/result"
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        awscli_pod.exec_cmd_on_pod(
            command=f'mkdir {original_dir} {result_dir}')
        command = "for i in $(seq 1 100); do touch /data/test$i; done"
        awscli_pod.exec_sh_cmd_on_pod(command=command, sh='sh')
        # Write all empty objects to the new bucket
        helpers.sync_object_directory(awscli_pod, original_dir,
                                      full_object_path, mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info(f'Downloading objects from MCG bucket to awscli pod')
        helpers.sync_object_directory(awscli_pod, full_object_path, result_dir,
                                      mcg_obj)

        # Checksum is compared between original and result object
        for obj in ('test' + str(i + 1) for i in range(100)):
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
Exemple #4
0
def test_fill_bucket(
    mcg_obj_session,
    awscli_pod_session,
    multiregion_mirror_setup_session
):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (
        bucket,
        backingstore1,
        backingstore2
    ) = multiregion_mirror_setup_session
    mcg_bucket_path = f's3://{bucket.name}'

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(
        command=f'mkdir {LOCAL_TESTOBJS_DIR_PATH}'
    )
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(
        awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH
    )

    logger.info('Uploading all pod objects to MCG bucket')

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(
            awscli_pod_session,
            LOCAL_TESTOBJS_DIR_PATH,
            f'{mcg_bucket_path}/{i}/',
            mcg_obj_session
        )

    mcg_obj_session.check_if_mirroring_is_done(bucket.name)
    assert bucket.status == constants.STATUS_BOUND

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(
        awscli_pod_session,
        mcg_bucket_path,
        LOCAL_TEMP_PATH,
        mcg_obj_session
    )

    assert bucket.status == constants.STATUS_BOUND

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert mcg_obj_session.verify_s3_object_integrity(
                original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
                result_object_path=f'{LOCAL_TEMP_PATH}/{i}/{obj}',
                awscli_pod=awscli_pod_session
            ), 'Checksum comparison between original and result object failed'
    assert bucket.status == constants.STATUS_BOUND
    def test_check_multi_object_integrity(self, mcg_obj, awscli_pod,
                                          bucket_factory, amount, file_type):
        """
        Test write multiple files to bucket and check integrity
        """
        original_dir = "/original"
        result_dir = "/result"
        if file_type == 'large':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY
        elif file_type == 'small':
            public_bucket = constants.TEST_FILES_BUCKET
            obj_key = 'random1.txt'
        elif file_type == 'large_small':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY.rsplit('/', 1)[0]

        # Download the file to pod
        awscli_pod.exec_cmd_on_pod(
            command=f'mkdir {original_dir} {result_dir}')
        public_s3 = boto3.client('s3')
        download_files = []
        # Use obj_key as prefix to download multiple files for large_small
        # case, it also works with single file
        for obj in public_s3.list_objects(Bucket=public_bucket,
                                          Prefix=obj_key).get('Contents'):
            # Skip the extra file in large file type
            if file_type == 'large' and obj["Key"] != obj_key:
                continue
            logger.info(
                f'Downloading {obj["Key"]} from AWS bucket {public_bucket}')
            command = f'wget -P {original_dir} '
            command += f'https://{public_bucket}.s3.amazonaws.com/{obj["Key"]}'
            awscli_pod.exec_cmd_on_pod(command=command)
            download_files.append(obj['Key'].split('/')[-1])

        # Write downloaded objects to the new bucket and check integrity
        bucketname = bucket_factory(1)[0].name
        base_path = f"s3://{bucketname}"
        for i in range(amount):
            full_object_path = base_path + f"/{i}/"
            helpers.sync_object_directory(awscli_pod, original_dir,
                                          full_object_path, mcg_obj)

            # Retrieve all objects from MCG bucket to result dir in Pod
            logger.info(f'Downloading objects from MCG bucket to awscli pod')
            helpers.sync_object_directory(awscli_pod, full_object_path,
                                          result_dir, mcg_obj)

            # Checksum is compared between original and result object
            for obj in download_files:
                assert mcg_obj.verify_s3_object_integrity(
                    original_object_path=f'{original_dir}/{obj}',
                    result_object_path=f'{result_dir}/{obj}',
                    awscli_pod=awscli_pod
                ), ('Checksum comparision between original and result object '
                    'failed')
Exemple #6
0
    def test_write_multi_files_to_bucket(self, mcg_obj, awscli_pod,
                                         bucket_factory, amount, file_type):
        """
        Test write multiple files to bucket
        """
        data_dir = '/data'
        if file_type == 'large':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY
        elif file_type == 'small':
            public_bucket = constants.TEST_FILES_BUCKET
            obj_key = 'random1.txt'
        elif file_type == 'large_small':
            public_bucket = PUBLIC_BUCKET
            obj_key = LARGE_FILE_KEY.rsplit('/', 1)[0]

        # Download the file to pod
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {data_dir}')
        public_s3_client = retrieve_anon_s3_resource().meta.client
        download_files = []
        # Use obj_key as prefix to download multiple files for large_small
        # case, it also works with single file
        for obj in public_s3_client.list_objects(
                Bucket=public_bucket, Prefix=obj_key).get('Contents'):
            # Skip the extra file in large file type
            if file_type == 'large' and obj["Key"] != obj_key:
                continue
            logger.info(
                f'Downloading {obj["Key"]} from AWS bucket {public_bucket}')
            download_obj_cmd = f'cp s3://{public_bucket}/{obj["Key"]} {data_dir}'
            awscli_pod.exec_cmd_on_pod(
                command=craft_s3_command(download_obj_cmd),
                out_yaml_format=False)
            download_files.append(obj['Key'])
        # Write all downloaded objects to the new bucket
        bucketname = bucket_factory(1)[0].name
        base_path = f"s3://{bucketname}"
        for i in range(amount):
            full_object_path = base_path + f"/{i}/"
            helpers.sync_object_directory(awscli_pod, data_dir,
                                          full_object_path, mcg_obj)

        obj_list = list(
            obj.key.split('/')[-1]
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))

        # Check total copy files amount match
        if file_type == 'large_small':
            assert len(obj_list) == 2 * amount, (
                "Total file amount does not match")
        else:
            assert len(obj_list) == amount, "Total file amount does not match"

        # Check deduplicate set is same
        test_set = set([i.split('/')[-1] for i in download_files])
        assert test_set == set(obj_list), "File name set does not match"
Exemple #7
0
def test_noobaa_postupgrade(
    mcg_obj_session,
    awscli_pod_session,
    multiregion_mirror_setup_session
):
    """
    Check bucket data and remove resources created in 'test_fill_bucket'.
    """

    (
        bucket,
        backingstore1,
        backingstore2
    ) = multiregion_mirror_setup_session
    mcg_bucket_path = f's3://{bucket.name}'

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        assert mcg_obj_session.verify_s3_object_integrity(
            original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
            result_object_path=f'{LOCAL_TEMP_PATH}/{obj}',
            awscli_pod=awscli_pod_session
        ), 'Checksum comparision between original and result object failed'

    assert bucket.status == constants.STATUS_BOUND

    # Clean up the temp dir
    awscli_pod_session.exec_cmd_on_pod(
        command=f'sh -c \"rm -rf {LOCAL_TEMP_PATH}/*\"'
    )

    mcg_obj_session.check_backingstore_state(
        'backing-store-' + backingstore1['name'],
        BS_OPTIMAL,
        timeout=360
    )
    mcg_obj_session.check_backingstore_state(
        'backing-store-' + backingstore2['name'],
        BS_OPTIMAL,
        timeout=360
    )

    assert bucket.status == constants.STATUS_BOUND

    # Verify integrity of A
    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(
        awscli_pod_session,
        mcg_bucket_path,
        LOCAL_TEMP_PATH,
        mcg_obj_session
    )
    assert bucket.status == constants.STATUS_BOUND
Exemple #8
0
    def test_data_reduction(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test data reduction mechanics

        """
        # TODO: Privatize test bucket
        download_dir = '/aws/downloaded'
        helpers.retrieve_test_objects_to_pod(awscli_pod, download_dir)
        bucket = bucket_factory(1)[0]
        bucketname = bucket.name
        full_object_path = f"s3://{bucketname}"
        helpers.sync_object_directory(awscli_pod, download_dir,
                                      full_object_path, mcg_obj)

        assert mcg_obj.check_data_reduction(bucketname), (
            'Data reduction did not work as anticipated.')
Exemple #9
0
    def test_multipart_upload_operations(self, mcg_obj, awscli_pod,
                                         bucket_factory):
        """
        Test Multipart upload operations on bucket and verifies the integrity of the downloaded object
        """
        bucket, key, origin_dir, res_dir, object_path, parts = setup(
            awscli_pod, bucket_factory)

        # Abort all Multipart Uploads for this Bucket (optional, for starting over)
        logger.info(f'Aborting any Multipart Upload on bucket:{bucket}')
        mcg_obj.abort_multipart_upload(bucket, key)

        # Create & list Multipart Upload on the Bucket
        logger.info(
            f'Initiating Multipart Upload on Bucket: {bucket} with Key {key}')
        upload_id = mcg_obj.create_multipart_upload(bucket, key)
        logger.info(
            f'Listing the Multipart Upload : {mcg_obj.list_multipart_upload(bucket)}'
        )

        # Uploading individual parts to the Bucket
        logger.info(f'Uploading individual parts to the bucket {bucket}')
        uploaded_parts = helpers.upload_parts(mcg_obj, awscli_pod, bucket, key,
                                              res_dir, upload_id, parts)

        # Listing the Uploaded parts
        logger.info(
            f'Listing the individual parts : {mcg_obj.list_uploaded_parts(bucket, key, upload_id)}'
        )

        # Completing the Multipart Upload
        logger.info(f'Completing the Multipart Upload on bucket: {bucket}')
        logger.info(
            mcg_obj.complete_multipart_upload(bucket, key, upload_id,
                                              uploaded_parts))

        # Checksum Validation: Downloading the object after completing Multipart Upload and verifying its integrity
        logger.info(
            f'Downloading the completed multipart object from MCG bucket to awscli pod'
        )
        helpers.sync_object_directory(awscli_pod, object_path, res_dir,
                                      mcg_obj)
        assert mcg_obj.verify_s3_object_integrity(
            original_object_path=f'{origin_dir}/{key}',
            result_object_path=f'{res_dir}/{key}',
            awscli_pod=awscli_pod
        ), 'Checksum comparision between original and result object failed'
    def test_write_file_to_bucket(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object IO using the S3 SDK
        """
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        data_dir = '/data'
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"
        downloaded_files = helpers.retrieve_test_objects_to_pod(
            awscli_pod, data_dir)
        # Write all downloaded objects to the new bucket
        helpers.sync_object_directory(awscli_pod, data_dir, full_object_path,
                                      mcg_obj)

        assert set(downloaded_files).issubset(
            obj.key
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))
    def test_write_empty_file_to_bucket(self, mcg_obj, awscli_pod,
                                        bucket_factory):
        """
        Test write empty files to bucket
        """
        data_dir = '/data'
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"

        # Touch create 1000 empty files in pod
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {data_dir}')
        command = "for i in $(seq 1 1000); do touch /data/test$i; done"
        awscli_pod.exec_sh_cmd_on_pod(command=command, sh='sh')
        # Write all empty objects to the new bucket
        helpers.sync_object_directory(awscli_pod, data_dir, full_object_path,
                                      mcg_obj)

        obj_set = set(
            obj.key
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))
        test_set = set('test' + str(i + 1) for i in range(1000))
        assert test_set == obj_set, "File name set does not match"
Exemple #12
0
def test_fill_bucket(mcg_obj_session, awscli_pod_session,
                     multiregion_mirror_setup_session):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, backingstore1, backingstore2) = multiregion_mirror_setup_session
    mcg_bucket_path = f's3://{bucket.name}'

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(
        command=f'mkdir {LOCAL_TESTOBJS_DIR_PATH}')
    test_objects = boto3.resource('s3').Bucket(
        constants.TEST_FILES_BUCKET).objects.all()

    for obj in test_objects:
        logger.info(f'Downloading {obj.key} from AWS test bucket')
        awscli_pod_session.exec_cmd_on_pod(command=(
            f'sh -c "wget -P {LOCAL_TESTOBJS_DIR_PATH} '
            f'https://{constants.TEST_FILES_BUCKET}.s3.amazonaws.com/{obj.key}"'
        ))
        DOWNLOADED_OBJS.append(f'{obj.key}')
        # Use 3x time more objects than there is in test objects pod
        for i in range(2):
            awscli_pod_session.exec_cmd_on_pod(
                command=f'sh -c "'
                f'cp {LOCAL_TESTOBJS_DIR_PATH}/{obj.key} '
                f'{LOCAL_TESTOBJS_DIR_PATH}/{obj.key}.{i}"')
            DOWNLOADED_OBJS.append(f'{obj.key}.{i}')

    logger.info(f'Uploading all pod objects to MCG bucket')

    sync_object_directory(awscli_pod_session,
                          's3://' + constants.TEST_FILES_BUCKET,
                          LOCAL_TESTOBJS_DIR_PATH)

    # Upload test objects to the NooBucket
    sync_object_directory(awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH,
                          mcg_bucket_path, mcg_obj_session)

    mcg_obj_session.check_if_mirroring_is_done(bucket.name)
    assert bucket.status == constants.STATUS_BOUND

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)

    assert bucket.status == constants.STATUS_BOUND

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        assert mcg_obj_session.verify_s3_object_integrity(
            original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
            result_object_path=f'{LOCAL_TEMP_PATH}/{obj}',
            awscli_pod=awscli_pod_session
        ), 'Checksum comparision between original and result object failed'
    assert bucket.status == constants.STATUS_BOUND
Exemple #13
0
    def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod):
        """
        Test with deletion of bucket has 1T objects stored in.
        """
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type='s3'
        )
        try:
            bucket = S3Bucket(mcg_obj, bucketname)
            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            helpers.retrieve_test_objects_to_pod(awscli_pod, data_dir)

            # Sync downloaded objects dir to the new bucket, sync to 3175
            # virtual dirs. With each dir around 315MB, and 3175 dirs will
            # reach targed 1TB data.
            logger.info('Writing objects to bucket')
            for i in range(3175):
                full_object_path = f"s3://{bucketname}/{i}/"
                helpers.sync_object_directory(
                    awscli_pod, data_dir, full_object_path, mcg_obj
                )

            # Delete bucket content use aws rm with --recursive option.
            # The object_versions.delete function does not work with objects
            # exceeds 1000.
            start = timeit.default_timer()
            helpers.rm_object_recursive(awscli_pod, bucketname, mcg_obj)
            bucket.delete()
            stop = timeit.default_timer()
            gap = (stop - start) // 60 % 60
            if gap > 10:
                assert False, "Failed to delete s3 bucket within 10 minutes"
        finally:
            if mcg_obj.s3_verify_bucket_exists(bucketname):
                helpers.rm_object_recursive(awscli_pod, bucketname, mcg_obj)
                mcg_obj.s3_resource.Bucket(bucketname).delete()
    def test_write_to_bucket_rbd_cephfs(self, verify_rgw_restart_count,
                                        setup_rbd_cephfs_pods, mcg_obj,
                                        awscli_pod, bucket_factory):
        """
        Test RGW restarts after running s3, rbd and cephfs IOs in parallel

        """
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"
        target_dir = '/data/'
        helpers.retrieve_test_objects_to_pod(awscli_pod, target_dir)
        with ThreadPoolExecutor() as p:
            p.submit(pod_io, setup_rbd_cephfs_pods)
            p.submit(
                helpers.sync_object_directory(awscli_pod, target_dir,
                                              full_object_path, mcg_obj))
Exemple #15
0
    def test_multiregion_mirror(self, mcg_obj, awscli_pod,
                                multiregion_mirror_setup):
        """
        Test multi-region bucket creation using the S3 SDK
        """

        bucket, backingstore1, backingstore2 = multiregion_mirror_setup
        bucket_name = bucket.name

        # Download test objects from the public bucket
        downloaded_objs = retrieve_test_objects_to_pod(awscli_pod,
                                                       '/aws/original/')

        logger.info(f'Uploading all pod objects to MCG bucket')
        local_testobjs_dir_path = '/aws/original'
        local_temp_path = '/aws/temp'
        mcg_bucket_path = f's3://{bucket_name}'

        sync_object_directory(awscli_pod,
                              's3://' + constants.TEST_FILES_BUCKET,
                              local_testobjs_dir_path)

        # Upload test objects to the NooBucket
        sync_object_directory(awscli_pod, local_testobjs_dir_path,
                              mcg_bucket_path, mcg_obj)

        mcg_obj.check_if_mirroring_is_done(bucket_name)

        # Bring bucket A down
        mcg_obj.toggle_aws_bucket_readwrite(backingstore1['name'])
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_AUTH_FAILED)

        # Verify integrity of B
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'

        # Clean up the temp dir
        awscli_pod.exec_cmd_on_pod(
            command=f'sh -c \"rm -rf {local_temp_path}/*\"')

        # Bring B down, bring A up
        logger.info('Blocking bucket B')
        mcg_obj.toggle_aws_bucket_readwrite(backingstore2['name'])
        logger.info('Freeing bucket A')
        mcg_obj.toggle_aws_bucket_readwrite(backingstore1['name'], block=False)
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_OPTIMAL)
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_AUTH_FAILED)

        # Verify integrity of A
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
        # Bring B up
        mcg_obj.toggle_aws_bucket_readwrite(backingstore2['name'], block=False)
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_OPTIMAL)