Example #1
0
    def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod):
        """
        Test with deletion of bucket has 1T objects stored in.
        """
        bucketname = create_unique_resource_name(
            resource_description='bucket', resource_type='s3'
        )
        try:
            bucket = S3Bucket(mcg_obj, bucketname)
            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = '/data'
            helpers.retrieve_test_objects_to_pod(awscli_pod, data_dir)

            # Sync downloaded objects dir to the new bucket, sync to 3175
            # virtual dirs. With each dir around 315MB, and 3175 dirs will
            # reach targed 1TB data.
            logger.info('Writing objects to bucket')
            for i in range(3175):
                full_object_path = f"s3://{bucketname}/{i}/"
                helpers.sync_object_directory(
                    awscli_pod, data_dir, full_object_path, mcg_obj
                )

            # Delete bucket content use aws rm with --recursive option.
            # The object_versions.delete function does not work with objects
            # exceeds 1000.
            start = timeit.default_timer()
            helpers.rm_object_recursive(awscli_pod, bucketname, mcg_obj)
            bucket.delete()
            stop = timeit.default_timer()
            gap = (stop - start) // 60 % 60
            if gap > 10:
                assert False, "Failed to delete s3 bucket within 10 minutes"
        finally:
            if mcg_obj.s3_verify_bucket_exists(bucketname):
                helpers.rm_object_recursive(awscli_pod, bucketname, mcg_obj)
                mcg_obj.s3_resource.Bucket(bucketname).delete()
Example #2
0
def measure_noobaa_exceed_bucket_quota(
    measurement_dir,
    request,
    mcg_obj,
    awscli_pod
):
    """
    Create NooBaa bucket, set its capacity quota to 2GB and fill it with data.

    Returns:
        dict: Contains information about `start` and `stop` time for
        corrupting Ceph Placement Group
    """
    bucket_name = create_unique_resource_name(
        resource_description='bucket',
        resource_type='s3'
    )
    bucket = S3Bucket(
        mcg_obj,
        bucket_name
    )
    mcg_obj.send_rpc_query(
        'bucket_api',
        'update_bucket',
        {
            'name': bucket_name,
            'quota': {
                'unit': 'GIGABYTE',
                'size': 2
            }
        }
    )

    def teardown():
        """
        Delete test bucket.
        """
        bucket.delete()

    request.addfinalizer(teardown)

    def exceed_bucket_quota():
        """
        Upload 5 files with 500MB size into bucket that has quota set to 2GB.

        Returns:
            str: Name of utilized bucket
        """
        nonlocal mcg_obj
        nonlocal bucket_name
        nonlocal awscli_pod
        # run_time of operation
        run_time = 60 * 11
        awscli_pod.exec_cmd_on_pod(
            'dd if=/dev/zero of=/tmp/testfile bs=1M count=500'
        )
        for i in range(1, 6):
            awscli_pod.exec_cmd_on_pod(
                helpers.craft_s3_command(
                    mcg_obj,
                    f"cp /tmp/testfile s3://{bucket_name}/testfile{i}"
                ),
                out_yaml_format=False,
                secrets=[
                    mcg_obj.access_key_id,
                    mcg_obj.access_key,
                    mcg_obj.s3_endpoint
                ]
            )

        logger.info(f"Waiting for {run_time} seconds")
        time.sleep(run_time)
        return bucket_name

    test_file = os.path.join(
        measurement_dir,
        'measure_noobaa_exceed__bucket_quota.json'
    )
    measured_op = measure_operation(exceed_bucket_quota, test_file)
    logger.info(f"Deleting data from bucket {bucket_name}")
    for i in range(1, 6):
        awscli_pod.exec_cmd_on_pod(
            helpers.craft_s3_command(
                mcg_obj,
                f"rm s3://{bucket_name}/testfile{i}"
            ),
            out_yaml_format=False,
            secrets=[
                mcg_obj.access_key_id,
                mcg_obj.access_key,
                mcg_obj.s3_endpoint
            ]
        )
    return measured_op