Example #1
0
    def test_bucket_delete_with_objects(
        self, mcg_obj, awscli_pod, bucket_factory, interface, bucketclass_dict
    ):
        """
        Negative test with deletion of bucket has objects stored in.

        """
        bucket = bucket_factory(interface=interface, bucketclass=bucketclass_dict)[0]
        bucketname = bucket.name

        data_dir = "/data"
        full_object_path = f"s3://{bucketname}"
        retrieve_test_objects_to_pod(awscli_pod, data_dir)
        sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)

        logger.info(f"Deleting bucket: {bucketname}")
        if interface == "S3":
            try:
                s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                assert not s3_del, "Unexpected s3 delete non-empty OBC succeed"
            except botocore.exceptions.ClientError as err:
                assert "BucketNotEmpty" in str(
                    err
                ), "Couldn't verify delete non-empty OBC with s3"
                logger.info(f"Delete non-empty OBC {bucketname} failed as expected")
        # Deletion verification is performed internally as part of delete()
        bucket.delete()
        if bucketclass_dict:
            bucket.bucketclass.delete()
Example #2
0
    def test_bucket_delete_with_objects(self, mcg_obj, interface, awscli_pod):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucketname = create_unique_resource_name(
            resource_description="bucket", resource_type=interface.lower())
        try:
            bucket = BUCKET_MAP[interface.lower()](bucketname, mcg=mcg_obj)

            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = "/data"
            full_object_path = f"s3://{bucketname}"
            retrieve_test_objects_to_pod(awscli_pod, data_dir)
            sync_object_directory(awscli_pod, data_dir, full_object_path,
                                  mcg_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = mcg_obj.s3_resource.Bucket(bucketname).delete()
                    assert not s3_del, "Unexpected s3 delete non-empty OBC succeed"
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(
                        err), "Couldn't verify delete non-empty OBC with s3"
                    logger.info(
                        f"Delete non-empty OBC {bucketname} failed as expected"
                    )
        finally:
            bucket.delete()
Example #3
0
    def test_bucket_delete_with_objects(
        self, rgw_bucket_factory, interface, awscli_pod
    ):
        """
        Negative test with deletion of bucket has objects stored in.
        """
        bucket = rgw_bucket_factory(1, interface)[0]
        bucketname = bucket.name
        obc_obj = OBC(bucketname)
        try:
            data_dir = "/data"
            full_object_path = f"s3://{bucketname}"
            retrieve_test_objects_to_pod(awscli_pod, data_dir)
            sync_object_directory(awscli_pod, data_dir, full_object_path, obc_obj)

            logger.info(f"Deleting bucket: {bucketname}")
            if interface == "S3":
                try:
                    s3_del = obc_obj.s3_resource.Bucket(bucketname).delete()
                    assert (
                        not s3_del
                    ), "Unexpected issue: Successfully deleted a bucket containing objects via S3"
                except botocore.exceptions.ClientError as err:
                    assert "BucketNotEmpty" in str(
                        err
                    ), "Couldn't verify delete non-empty OBC with s3"
                    logger.info(f"Delete non-empty OBC {bucketname} failed as expected")
        finally:
            bucket.delete()
    def test_data_reduction_performance(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test data reduction performance
        """
        # TODO: Privatize test bucket
        download_dir = "/aws/downloaded"
        retrieve_test_objects_to_pod(awscli_pod, download_dir)
        bucket = bucket_factory(1)[0]
        bucketname = bucket.name
        full_object_path = f"s3://{bucketname}"
        sync_object_directory(awscli_pod, download_dir, full_object_path, mcg_obj)

        assert mcg_obj.check_data_reduction(
            bucketname, 100 * 1024 * 1024
        ), "Data reduction did not work as anticipated."
    def test_check_object_integrity(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1)[0].name
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f'mkdir {result_dir}')
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(
            awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path,
                              mcg_obj)

        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info('Downloading all objects from MCG bucket to awscli pod')
        sync_object_directory(awscli_pod, full_object_path, result_dir,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f'{original_dir}/{obj}',
                result_object_path=f'{result_dir}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
Example #6
0
    def test_check_object_integrity(
        self,
        mcg_obj,
        awscli_pod,
        bucket_factory,
        bucketclass_dict,
        test_directory_setup,
    ):
        """
        Test object integrity using md5sum
        """
        bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name
        original_dir = test_directory_setup.origin_dir
        result_dir = test_directory_setup.result_dir
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, mcg_obj)
        # Retrieve all objects from MCG bucket to result dir in Pod
        logger.info("Downloading all objects from MCG bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparison between original and result object failed"
    def test_check_object_integrity(self, awscli_pod, rgw_bucket_factory):
        """
        Test object integrity using md5sum
        """
        bucketname = rgw_bucket_factory(1, "rgw-oc")[0].name
        obc_obj = OBC(bucketname)
        original_dir = "/original"
        result_dir = "/result"
        awscli_pod.exec_cmd_on_pod(command=f"mkdir {result_dir}")
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, original_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, original_dir, full_object_path, obc_obj)

        logger.info("Downloading all objects from RGW bucket to awscli pod")
        sync_object_directory(awscli_pod, full_object_path, result_dir, obc_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_files:
            assert verify_s3_object_integrity(
                original_object_path=f"{original_dir}/{obj}",
                result_object_path=f"{result_dir}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparision between original and result object failed"
Example #8
0
    def test_write_file_to_bucket(
        self,
        mcg_obj,
        awscli_pod,
        bucket_class_factory,
        bucket_factory,
        interface,
        bucketclass_dict,
    ):
        """
        Test object IO using the S3 SDK
        """
        # Retrieve a list of all objects on the test-objects bucket and
        # downloads them to the pod
        data_dir = "/data"
        bucketname = bucket_factory(1,
                                    interface=interface,
                                    bucketclass=bucketclass_dict)[0].name
        full_object_path = f"s3://{bucketname}"
        downloaded_files = retrieve_test_objects_to_pod(awscli_pod, data_dir)
        # Write all downloaded objects to the new bucket
        sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)

        assert set(downloaded_files).issubset(
            obj.key
            for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname))
Example #9
0
    def test_data_reduction(self, mcg_obj, awscli_pod, bucket_factory):
        """
        Test data reduction mechanics

        """
        # TODO: Privatize test bucket
        download_dir = '/aws/downloaded'
        retrieve_test_objects_to_pod(awscli_pod, download_dir)
        bucket = bucket_factory(1)[0]
        bucketname = bucket.name
        full_object_path = f"s3://{bucketname}"
        sync_object_directory(awscli_pod, download_dir, full_object_path,
                              mcg_obj)

        assert mcg_obj.check_data_reduction(bucketname), (
            'Data reduction did not work as anticipated.')
Example #10
0
    def test_write_to_bucket_rbd_cephfs(self, verify_rgw_restart_count,
                                        setup_rbd_cephfs_pods, mcg_obj,
                                        awscli_pod, bucket_factory):
        """
        Test RGW restarts after running s3, rbd and cephfs IOs in parallel

        """
        bucketname = bucket_factory(1)[0].name
        full_object_path = f"s3://{bucketname}"
        target_dir = '/data/'
        retrieve_test_objects_to_pod(awscli_pod, target_dir)
        with ThreadPoolExecutor() as p:
            p.submit(pod_io, setup_rbd_cephfs_pods)
            p.submit(
                sync_object_directory(awscli_pod, target_dir, full_object_path,
                                      mcg_obj))
Example #11
0
    def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod):
        """
        Test with deletion of bucket has 1T objects stored in.
        """
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type='s3'
        )
        try:
            bucket = MCGS3Bucket(bucketname, mcg_obj)
            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            retrieve_test_objects_to_pod(awscli_pod, data_dir)

            # Sync downloaded objects dir to the new bucket, sync to 3175
            # virtual dirs. With each dir around 315MB, and 3175 dirs will
            # reach targed 1TB data.
            logger.info('Writing objects to bucket')
            for i in range(3175):
                full_object_path = f"s3://{bucketname}/{i}/"
                sync_object_directory(
                    awscli_pod, data_dir, full_object_path, mcg_obj
                )

            # Delete bucket content use aws rm with --recursive option.
            # The object_versions.delete function does not work with objects
            # exceeds 1000.
            start = timeit.default_timer()
            rm_object_recursive(awscli_pod, bucketname, mcg_obj)
            bucket.delete()
            stop = timeit.default_timer()
            gap = (stop - start) // 60 % 60
            if gap > 10:
                assert False, "Failed to delete s3 bucket within 10 minutes"
        finally:
            if mcg_obj.s3_verify_bucket_exists(bucketname):
                rm_object_recursive(awscli_pod, bucketname, mcg_obj)
                mcg_obj.s3_resource.Bucket(bucketname).delete()
Example #12
0
def test_fill_bucket(
    mcg_obj_session, awscli_pod_session, multiregion_mirror_setup_session
):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session

    mcg_bucket_path = f"s3://{bucket.name}"

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(command=f"mkdir {LOCAL_TESTOBJS_DIR_PATH}")
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(
        awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH
    )

    logger.info("Uploading all pod objects to MCG bucket")

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(
            awscli_pod_session,
            LOCAL_TESTOBJS_DIR_PATH,
            f"{mcg_bucket_path}/{i}/",
            mcg_obj_session,
        )

    mcg_obj_session.check_if_mirroring_is_done(bucket.name, timeout=420)
    bucket.verify_health()

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(
        awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH, mcg_obj_session
    )

    bucket.verify_health()

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert verify_s3_object_integrity(
                original_object_path=f"{LOCAL_TESTOBJS_DIR_PATH}/{obj}",
                result_object_path=f"{LOCAL_TEMP_PATH}/{i}/{obj}",
                awscli_pod=awscli_pod_session,
            ), "Checksum comparison between original and result object failed"
    bucket.verify_health()
Example #13
0
def test_fill_bucket(mcg_obj_session, awscli_pod_session,
                     multiregion_mirror_setup_session):
    """
    Test multi-region bucket creation using the S3 SDK. Fill the bucket for
    upgrade testing.
    """

    (bucket, created_backingstores) = multiregion_mirror_setup_session

    mcg_bucket_path = f's3://{bucket.name}'

    # Download test objects from the public bucket
    awscli_pod_session.exec_cmd_on_pod(
        command=f'mkdir {LOCAL_TESTOBJS_DIR_PATH}')
    DOWNLOADED_OBJS = retrieve_test_objects_to_pod(awscli_pod_session,
                                                   LOCAL_TESTOBJS_DIR_PATH)

    logger.info('Uploading all pod objects to MCG bucket')

    # Upload test objects to the NooBucket 3 times
    for i in range(3):
        sync_object_directory(awscli_pod_session, LOCAL_TESTOBJS_DIR_PATH,
                              f'{mcg_bucket_path}/{i}/', mcg_obj_session)

    mcg_obj_session.check_if_mirroring_is_done(bucket.name)
    assert bucket.status == constants.STATUS_BOUND

    # Retrieve all objects from MCG bucket to result dir in Pod
    sync_object_directory(awscli_pod_session, mcg_bucket_path, LOCAL_TEMP_PATH,
                          mcg_obj_session)

    assert bucket.status == constants.STATUS_BOUND

    # Checksum is compared between original and result object
    for obj in DOWNLOADED_OBJS:
        for i in range(3):
            assert verify_s3_object_integrity(
                original_object_path=f'{LOCAL_TESTOBJS_DIR_PATH}/{obj}',
                result_object_path=f'{LOCAL_TEMP_PATH}/{i}/{obj}',
                awscli_pod=awscli_pod_session
            ), 'Checksum comparison between original and result object failed'
    assert bucket.status == constants.STATUS_BOUND
Example #14
0
    def test_multiregion_mirror(self, mcg_obj_with_aws, awscli_pod,
                                multiregion_mirror_setup):
        """
        Test multi-region bucket creation using the S3 SDK
        """

        bucket, backingstore1, backingstore2 = multiregion_mirror_setup
        bucket_name = bucket.name

        # Download test objects from the public bucket
        downloaded_objs = retrieve_test_objects_to_pod(awscli_pod,
                                                       '/aws/original/')

        logger.info('Uploading all pod objects to MCG bucket')
        local_testobjs_dir_path = '/aws/original'
        local_temp_path = '/aws/temp'
        mcg_bucket_path = f's3://{bucket_name}'

        sync_object_directory(awscli_pod,
                              's3://' + constants.TEST_FILES_BUCKET,
                              local_testobjs_dir_path)

        # Upload test objects to the NooBucket
        sync_object_directory(awscli_pod, local_testobjs_dir_path,
                              mcg_bucket_path, mcg_obj_with_aws)

        mcg_obj_with_aws.check_if_mirroring_is_done(bucket_name)

        # Bring bucket A down
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore1['name'])
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_AUTH_FAILED)

        # Verify integrity of B
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj_with_aws)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'

        # Clean up the temp dir
        awscli_pod.exec_cmd_on_pod(
            command=f'sh -c \"rm -rf {local_temp_path}/*\"')

        # Bring B down, bring A up
        logger.info('Blocking bucket B')
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore2['name'])
        logger.info('Freeing bucket A')
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore1['name'],
                                                     block=False)
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore1['name'], BS_OPTIMAL)
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_AUTH_FAILED)

        # Verify integrity of A
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj_with_aws)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f'{local_testobjs_dir_path}/{obj}',
                result_object_path=f'{local_temp_path}/{obj}',
                awscli_pod=awscli_pod
            ), 'Checksum comparision between original and result object failed'
        # Bring B up
        mcg_obj_with_aws.toggle_aws_bucket_readwrite(backingstore2['name'],
                                                     block=False)
        mcg_obj_with_aws.check_backingstore_state(
            'backing-store-' + backingstore2['name'], BS_OPTIMAL)
Example #15
0
    def test_multiregion_mirror(self, cld_mgr, mcg_obj, awscli_pod,
                                multiregion_mirror_setup):
        """
        Test multi-region bucket creation using the S3 SDK
        """

        bucket, backingstores = multiregion_mirror_setup
        backingstore1 = backingstores[0]
        backingstore2 = backingstores[1]

        bucket_name = bucket.name
        aws_client = cld_mgr.aws_client

        # Download test objects from the public bucket
        downloaded_objs = retrieve_test_objects_to_pod(awscli_pod,
                                                       "/aws/original/")

        logger.info("Uploading all pod objects to MCG bucket")
        local_testobjs_dir_path = "/aws/original"
        local_temp_path = "/aws/temp"
        mcg_bucket_path = f"s3://{bucket_name}"

        sync_object_directory(awscli_pod,
                              "s3://" + constants.TEST_FILES_BUCKET,
                              local_testobjs_dir_path)

        # Upload test objects to the NooBucket
        sync_object_directory(awscli_pod, local_testobjs_dir_path,
                              mcg_bucket_path, mcg_obj)

        mcg_obj.check_if_mirroring_is_done(bucket_name)

        # Bring bucket A down
        aws_client.toggle_aws_bucket_readwrite(backingstore1.uls_name)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore1.name,
                                         BS_AUTH_FAILED)

        # Verify integrity of B
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f"{local_testobjs_dir_path}/{obj}",
                result_object_path=f"{local_temp_path}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparision between original and result object failed"

        # Clean up the temp dir
        awscli_pod.exec_cmd_on_pod(
            command=f'sh -c "rm -rf {local_temp_path}/*"')

        # Bring B down, bring A up
        logger.info("Blocking bucket B")
        aws_client.toggle_aws_bucket_readwrite(backingstore2.uls_name)
        logger.info("Freeing bucket A")
        aws_client.toggle_aws_bucket_readwrite(backingstore1.uls_name,
                                               block=False)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore1.name,
                                         BS_OPTIMAL)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore2.name,
                                         BS_AUTH_FAILED)

        # Verify integrity of A
        # Retrieve all objects from MCG bucket to result dir in Pod
        sync_object_directory(awscli_pod, mcg_bucket_path, local_temp_path,
                              mcg_obj)

        # Checksum is compared between original and result object
        for obj in downloaded_objs:
            assert verify_s3_object_integrity(
                original_object_path=f"{local_testobjs_dir_path}/{obj}",
                result_object_path=f"{local_temp_path}/{obj}",
                awscli_pod=awscli_pod,
            ), "Checksum comparision between original and result object failed"
        # Bring B up
        aws_client.toggle_aws_bucket_readwrite(backingstore2.uls_name,
                                               block=False)
        mcg_obj.check_backingstore_state("backing-store-" + backingstore2.name,
                                         BS_OPTIMAL)