Esempio n. 1
0
    def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod_session):
        """
        Test with deletion of bucket has 1T objects stored in.
        """
        bucketname = create_unique_resource_name(resource_description="bucket",
                                                 resource_type="s3")
        try:
            bucket = MCGS3Bucket(bucketname, mcg_obj)
            logger.info(f"aws s3 endpoint is {mcg_obj.s3_endpoint}")
            logger.info(f"aws region is {mcg_obj.region}")
            data_dir = AWSCLI_TEST_OBJ_DIR

            # Sync downloaded objects dir to the new bucket, sync to 3175
            # virtual dirs. With each dir around 315MB, and 3175 dirs will
            # reach targed 1TB data.
            logger.info("Writing objects to bucket")
            for i in range(3175):
                full_object_path = f"s3://{bucketname}/{i}/"
                sync_object_directory(awscli_pod_session, data_dir,
                                      full_object_path, mcg_obj)

            # Delete bucket content use aws rm with --recursive option.
            # The object_versions.delete function does not work with objects
            # exceeds 1000.
            start = timeit.default_timer()
            rm_object_recursive(awscli_pod_session, bucketname, mcg_obj)
            bucket.delete()
            stop = timeit.default_timer()
            gap = (stop - start) // 60 % 60
            if gap > 10:
                assert False, "Failed to delete s3 bucket within 10 minutes"
        finally:
            if mcg_obj.s3_verify_bucket_exists(bucketname):
                rm_object_recursive(awscli_pod_session, bucketname, mcg_obj)
                mcg_obj.s3_resource.Bucket(bucketname).delete()
Esempio n. 2
0
def measure_noobaa_exceed_bucket_quota(measurement_dir, request, mcg_obj, awscli_pod):
    """
    Create NooBaa bucket, set its capacity quota to 2GB and fill it with data.

    Returns:
        dict: Contains information about `start` and `stop` time for
        corrupting Ceph Placement Group
    """
    bucket_name = create_unique_resource_name(
        resource_description="bucket", resource_type="s3"
    )
    bucket = MCGS3Bucket(bucket_name, mcg=mcg_obj)
    mcg_obj.send_rpc_query(
        "bucket_api",
        "update_bucket",
        {"name": bucket_name, "quota": {"unit": "GIGABYTE", "size": 2}},
    )
    bucket_info = mcg_obj.get_bucket_info(bucket.name)
    logger.info(f"Bucket {bucket.name} storage: {bucket_info['storage']}")
    logger.info(f"Bucket {bucket.name} data: {bucket_info['data']}")

    def teardown():
        """
        Delete test bucket.
        """
        bucket.delete()

    request.addfinalizer(teardown)

    def exceed_bucket_quota():
        """
        Upload 5 files with 500MB size into bucket that has quota set to 2GB.

        Returns:
            str: Name of utilized bucket
        """
        nonlocal mcg_obj
        nonlocal bucket_name
        nonlocal awscli_pod
        # run_time of operation
        run_time = 60 * 14
        awscli_pod.exec_cmd_on_pod("dd if=/dev/zero of=/tmp/testfile bs=1M count=500")
        for i in range(1, 6):
            awscli_pod.exec_cmd_on_pod(
                craft_s3_command(
                    f"cp /tmp/testfile s3://{bucket_name}/testfile{i}", mcg_obj
                ),
                out_yaml_format=False,
                secrets=[
                    mcg_obj.access_key_id,
                    mcg_obj.access_key,
                    mcg_obj.s3_endpoint,
                ],
            )

        logger.info(f"Waiting for {run_time} seconds")
        time.sleep(run_time)
        return bucket_name

    test_file = os.path.join(
        measurement_dir, "measure_noobaa_exceed__bucket_quota.json"
    )
    measured_op = measure_operation(exceed_bucket_quota, test_file)

    bucket_info = mcg_obj.get_bucket_info(bucket.name)
    logger.info(f"Bucket {bucket.name} storage: {bucket_info['storage']}")
    logger.info(f"Bucket {bucket.name} data: {bucket_info['data']}")

    logger.info(f"Deleting data from bucket {bucket_name}")
    for i in range(1, 6):
        awscli_pod.exec_cmd_on_pod(
            craft_s3_command(f"rm s3://{bucket_name}/testfile{i}", mcg_obj),
            out_yaml_format=False,
            secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint],
        )
    return measured_op