def test_bucket_delete_with_objects(self, mcg_obj, interface, awscli_pod):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucket_map = {'s3': S3Bucket, 'oc': OCBucket, 'cli': CLIBucket}
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type=interface.lower())
        try:
            bucket = bucket_map[interface.lower()](mcg_obj, bucketname)

            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            full_object_path = f"s3://{bucketname}"
            helpers.retrieve_test_objects_to_pod(awscli_pod, data_dir)
            helpers.sync_object_directory(awscli_pod, data_dir,
                                          full_object_path, mcg_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                    assert not s3_del, (
                        "Unexpected s3 delete non-empty OBC succeed")
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(err), (
                        "Couldn't verify delete non-empty OBC with s3")
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
    def test_write_to_bucket_rbd_cephfs(self, verify_rgw_restart_count,
                                        setup_rbd_cephfs_pods, mcg_obj,
                                        awscli_pod, bucket_factory):
        """
        Test RGW restarts after running s3, rbd and cephfs IOs in parallel

        """
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"
        target_dir = '/data/'
        helpers.retrieve_test_objects_to_pod(awscli_pod, target_dir)
        with ThreadPoolExecutor() as p:
            p.submit(pod_io, setup_rbd_cephfs_pods)
            p.submit(
                helpers.sync_object_directory(awscli_pod, target_dir,
                                              full_object_path, mcg_obj))
Beispiel #3
0
    def test_data_reduction(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test data reduction mechanics

        """
        # TODO: Privatize test bucket
        download_dir = '/aws/downloaded'
        helpers.retrieve_test_objects_to_pod(awscli_pod, download_dir)
        bucket = bucket_factory(1)[0]
        bucketname = bucket.name
        full_object_path = f"s3://{bucketname}"
        helpers.sync_object_directory(awscli_pod, download_dir,
                                      full_object_path, mcg_obj)

        assert mcg_obj.check_data_reduction(bucketname), (
            'Data reduction did not work as anticipated.')
    def test_check_object_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1)[0].name
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {result_dir}')
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = helpers.retrieve_test_objects_to_pod(
            awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        helpers.sync_object_directory(awscli_pod, original_dir,
                                      full_object_path, mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info(f'Downloading all objects from MCG bucket to awscli pod')
        helpers.sync_object_directory(awscli_pod, full_object_path, result_dir,
                                      mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
Beispiel #5
0
def test_fill_bucket(
    mcg_obj_session,
    awscli_pod_session,
    multiregion_mirror_setup_session
):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (
        bucket,
        backingstore1,
        backingstore2
    ) = multiregion_mirror_setup_session
    mcg_bucket_path = f's3://{bucket.name}'

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(
        command=f'mkdir {LOCAL_TESTOBJS_DIR_PATH}'
    )
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(
        awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH
    )

    logger.info('Uploading all pod objects to MCG bucket')

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(
            awscli_pod_session,
            LOCAL_TESTOBJS_DIR_PATH,
            f'{mcg_bucket_path}/{i}/',
            mcg_obj_session
        )

    mcg_obj_session.check_if_mirroring_is_done(bucket.name)
    assert bucket.status == constants.STATUS_BOUND

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(
        awscli_pod_session,
        mcg_bucket_path,
        LOCAL_TEMP_PATH,
        mcg_obj_session
    )

    assert bucket.status == constants.STATUS_BOUND

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert mcg_obj_session.verify_s3_object_integrity(
                original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
                result_object_path=f'{LOCAL_TEMP_PATH}/{i}/{obj}',
                awscli_pod=awscli_pod_session
            ), 'Checksum comparison between original and result object failed'
    assert bucket.status == constants.STATUS_BOUND
Beispiel #6
0
    def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod):
        """
        Test with deletion of bucket has 1T objects stored in.
        """
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type='s3'
        )
        try:
            bucket = S3Bucket(mcg_obj, bucketname)
            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            helpers.retrieve_test_objects_to_pod(awscli_pod, data_dir)

            # Sync downloaded objects dir to the new bucket, sync to 3175
            # virtual dirs. With each dir around 315MB, and 3175 dirs will
            # reach targed 1TB data.
            logger.info('Writing objects to bucket')
            for i in range(3175):
                full_object_path = f"s3://{bucketname}/{i}/"
                helpers.sync_object_directory(
                    awscli_pod, data_dir, full_object_path, mcg_obj
                )

            # Delete bucket content use aws rm with --recursive option.
            # The object_versions.delete function does not work with objects
            # exceeds 1000.
            start = timeit.default_timer()
            helpers.rm_object_recursive(awscli_pod, bucketname, mcg_obj)
            bucket.delete()
            stop = timeit.default_timer()
            gap = (stop - start) // 60 % 60
            if gap > 10:
                assert False, "Failed to delete s3 bucket within 10 minutes"
        finally:
            if mcg_obj.s3_verify_bucket_exists(bucketname):
                helpers.rm_object_recursive(awscli_pod, bucketname, mcg_obj)
                mcg_obj.s3_resource.Bucket(bucketname).delete()
    def test_write_file_to_bucket(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object IO using the S3 SDK
        """
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        data_dir = '/data'
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"
        downloaded_files = helpers.retrieve_test_objects_to_pod(
            awscli_pod, data_dir)
        # Write all downloaded objects to the new bucket
        helpers.sync_object_directory(awscli_pod, data_dir, full_object_path,
                                      mcg_obj)

        assert set(downloaded_files).issubset(
            obj.key
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))
Beispiel #8
0
    def test_multiregion_mirror(self, mcg_obj, awscli_pod,
                                multiregion_mirror_setup):
        """
        Test multi-region bucket creation using the S3 SDK
        """

        bucket, backingstore1, backingstore2 = multiregion_mirror_setup
        bucket_name = bucket.name

        # Download test objects from the public bucket
        downloaded_objs = retrieve_test_objects_to_pod(awscli_pod,
                                                       '/aws/original/')

        logger.info(f'Uploading all pod objects to MCG bucket')
        local_testobjs_dir_path = '/aws/original'
        local_temp_path = '/aws/temp'
        mcg_bucket_path = f's3://{bucket_name}'

        sync_object_directory(awscli_pod,
                              's3://' + constants.TEST_FILES_BUCKET,
                              local_testobjs_dir_path)

        # Upload test objects to the NooBucket
        sync_object_directory(awscli_pod, local_testobjs_dir_path,
                              mcg_bucket_path, mcg_obj)

        mcg_obj.check_if_mirroring_is_done(bucket_name)

        # Bring bucket A down
        mcg_obj.toggle_aws_bucket_readwrite(backingstore1['name'])
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_AUTH_FAILED)

        # Verify integrity of B
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'

        # Clean up the temp dir
        awscli_pod.exec_cmd_on_pod(
            command=f'sh -c \"rm -rf {local_temp_path}/*\"')

        # Bring B down, bring A up
        logger.info('Blocking bucket B')
        mcg_obj.toggle_aws_bucket_readwrite(backingstore2['name'])
        logger.info('Freeing bucket A')
        mcg_obj.toggle_aws_bucket_readwrite(backingstore1['name'], block=False)
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_OPTIMAL)
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_AUTH_FAILED)

        # Verify integrity of A
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert mcg_obj.verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
        # Bring B up
        mcg_obj.toggle_aws_bucket_readwrite(backingstore2['name'], block=False)
        mcg_obj.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_OPTIMAL)