예제 #1
0
def test_exec(config):
    test_info = AddTestInfo('test frontends configuration')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            auth = Auth(each_user, ssl=config.ssl)
            rgw_conn = auth.do_auth()
            bucket_name_to_create2 = utils.gen_bucket_name_from_userid(
                each_user['user_id'])
            log.info('creating bucket with name: %s' % bucket_name_to_create2)
            bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn,
                                             each_user)
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
예제 #2
0
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'BucketVersioning',
        'args': [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'status',
        'args': None
    })
    if version_status is None:
        log.info('bucket versioning still not enabled')
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'enable',
        'args': None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info('version enabled')
    else:
        raise TestExecError("version enable failed")
    return bucket
def test_exec(config, requester):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    log.info('requester type: %s' % requester)

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        # create buckets
        log.info('no of buckets to create: %s' % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                each_user['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = resuables.create_bucket(bucket_name=bucket_name_to_create,
                                             rgw=rgw_conn,
                                             user_info=each_user)
            bucket_request_payer = s3lib.resource_op({
                'obj': rgw_conn,
                'resource': 'BucketRequestPayment',
                'args': [bucket.name]
            })
            # change the bucket request payer to 'requester'
            payer = {'Payer': requester}
            response = s3lib.resource_op({
                'obj':
                bucket_request_payer,
                'resource':
                'put',
                'kwargs':
                dict(RequestPaymentConfiguration=payer)
            })
            log.info(response)
            if response is not None:
                response = HttpResponseParser(response)
                if response.status_code == 200:
                    log.info('bucket created')
                else:
                    raise TestExecError(
                        "bucket request payer modification failed")
            else:
                raise TestExecError("bucket request payer modification failed")
            payer = bucket_request_payer.payer
            log.info('bucket request payer: %s' % payer)
            if payer != 'Requester':
                TestExecError('Request payer is not set or changed properly ')
            log.info('s3 objects to create: %s' % config.objects_count)
            if config.objects_count is not None:
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    resuables.upload_object(s3_object_name, bucket,
                                            TEST_DATA_PATH, config, each_user)
def test_exec(config):
    test_info = AddTestInfo('Test Byte range')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user, ssl=config.ssl)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=1)
                bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user)
                # uploading data
                log.info('s3 objects to create: %s' % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                    log.info('testing for negative range')
                    response = rgw_conn2.get_object(Bucket=bucket.name, Key=s3_object_name, Range='-2--1')
                    log.info('response: %s\n' % response)
                    log.info('Content-Lenght: %s' % response['ContentLength'])
                    log.info('s3_object_size: %s' % (config.obj_size * 1024 * 1024))
                    if response['ContentLength'] != config.obj_size * 1024 * 1024:
                        TestExecError("Content Lenght not matched")
                    log.info('testing for one positive and one negative range')
                    response = rgw_conn2.get_object(Bucket=bucket.name, Key=s3_object_name, Range='-1-3')
                    log.info('response: %s\n' % response)
                    log.info('Content-Length: %s' % response['ContentLength'])
                    log.info('s3_object_size: %s' % (config.obj_size * 1024 * 1024))
                    if response['ContentLength'] != config.obj_size * 1024 * 1024:
                        TestExecError("Content Lenght not matched")

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
예제 #5
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        bucket_name_to_create2 = utils.gen_bucket_name_from_userid(each_user['user_id'])
        log.info('creating bucket with name: %s' % bucket_name_to_create2)
        bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn, each_user)
예제 #6
0
def test_exec(rgw_user_info_file, config):

    test_info = AddTestInfo('Test Basic IO on S3')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()

    try:

        test_info.started_info()

        with open(rgw_user_info_yaml, 'r') as f:
            rgw_user_info = yaml.load(f)

        mount_point = rgw_user_info['nfs_mnt_point']

        nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file)

        mounted = nfs_ganesha.initialize(write_io_info=False)

        if mounted is False:
            raise TestExecError("mount failed")

        if nfs_ganesha.rgw_user_info[
                'nfs_version'] == 4 and nfs_ganesha.rgw_user_info[
                    'Pseudo'] is not None:
            log.info('nfs version: 4')
            log.info('adding Pseudo path to writable mount point')
            mount_point = os.path.join(mount_point,
                                       nfs_ganesha.rgw_user_info['Pseudo'])
            log.info('writable mount point with Pseudo: %s' % mount_point)

        log.info('authenticating rgw user')

        # authenticate

        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()

        # add user_info io_info yaml file

        user_info_add = basic_io_structure.user(**rgw_user_info)
        write_user_info.add_user_info(user_info_add)

        if config.io_op_config.get('create', None) is True:

            # create buckets

            for bc in range(config.bucket_count):

                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    rgw_user_info['user_id'], rand_no=bc)

                bucket = s3_reusables.create_bucket(bucket_name_to_create,
                                                    rgw_conn, rgw_user_info)

                # uploading data

                log.info('s3 objects to create: %s' % config.objects_count)

                for oc in range(config.objects_count):

                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)

                    s3_reusables.upload_object(s3_object_name, bucket,
                                               TEST_DATA_PATH, config,
                                               rgw_user_info)

            log.info('verification Starts on NFS mount after %s seconds' %
                     SLEEP_TIME)

            time.sleep(SLEEP_TIME)

            read_io_info_on_nfs = ReadIOInfoOnNFS(mount_point)
            read_io_info_on_nfs.yaml_fname = 'io_info.yaml'
            read_io_info_on_nfs.initialize_verify_io()
            read_io_info_on_nfs.verify_if_basedir_created()
            read_io_info_on_nfs.verify_if_files_created()

            log.info('verification complete, data intact')

            created_buckets = read_io_info_on_nfs.base_dirs
            created_objects = read_io_info_on_nfs.files

            if config.io_op_config.get('delete', None) is True:

                log.info('delete operation starts')

                for bucket_name in created_buckets:

                    bucket = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Bucket',
                        'args': [os.path.basename(bucket_name)]
                    })  # buckets are base dirs in NFS

                    objects = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'objects',
                        'args': None
                    })

                    log.info('deleting all objects in bucket')

                    objects_deleted = s3lib.resource_op({
                        'obj': objects,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('objects_deleted: %s' % objects_deleted)

                    if objects_deleted is False:
                        raise TestExecError(
                            'Resource execution failed: Object deletion failed'
                        )

                    if objects_deleted is not None:

                        response = HttpResponseParser(objects_deleted[0])

                        if response.status_code == 200:
                            log.info('objects deleted ')

                        else:
                            raise TestExecError("objects deletion failed")

                    else:
                        raise TestExecError("objects deletion failed")

                    log.info('deleting bucket: %s' % bucket.name)

                    bucket_deleted_status = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('bucket_deleted_status: %s' %
                             bucket_deleted_status)

                    if bucket_deleted_status is not None:

                        response = HttpResponseParser(bucket_deleted_status)

                        if response.status_code == 204:
                            log.info('bucket deleted ')

                        else:
                            raise TestExecError("bucket deletion failed")

                    else:
                        raise TestExecError("bucket deletion failed")

                log.info(
                    'verification on NFS will start after %s seconds for delete operation'
                    % SLEEP_TIME)

                time.sleep(200)

                for basedir in created_buckets:

                    exists = os.path.exists(basedir)

                    log.info('exists status: %s' % exists)

                    if exists is True:
                        raise TestExecError(
                            "Basedir or Basedir: %s not deleted on NFS" %
                            basedir)

                log.info('basedirs deleted')

                for each_file in created_objects:

                    log.info('verifying existence for: %s' % each_file['file'])

                    exists = os.path.exists(each_file['file'])

                    if exists:
                        raise TestExecError("files not created")

                    log.info('file deleted')

                log.info(
                    'verification of files complete, files exists and data intact'
                )

            if config.io_op_config.get('move', None) is True:

                log.info('move operation starts')

                for each_file in created_objects:

                    # in s3 move operation is achieved by copying the same object with the new name and
                    #  deleting the old object

                    log.info('move operation for :%s' % each_file['file'])

                    new_obj_name = os.path.basename(
                        each_file['file']) + ".moved"

                    log.info('new file name: %s' % new_obj_name)

                    new_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [each_file['bucket'], new_obj_name],
                    })

                    new_object.copy_from(
                        CopySource='%s/%s' %
                        (each_file['bucket'],
                         os.path.basename(
                             each_file['file'])))  # old object name

                    old_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [
                            each_file['bucket'],
                            os.path.basename(each_file['file'])
                        ],
                    })
                    old_object.delete()

                    each_file['file'] = os.path.abspath(
                        os.path.join(mount_point, each_file['bucket'],
                                     new_obj_name))

                log.info(
                    'verification on NFS for move operation will start after %s seconds'
                    % SLEEP_TIME)
                time.sleep(SLEEP_TIME)

                read_io_info_on_nfs.verify_if_files_created()

                log.info('move completed, data intact')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo('test versioning with objects')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()

    try:
        test_info.started_info()
        version_count = 3
        # create user
        s3_user = s3lib.create_users(1)[0]
        # authenticate
        auth = Auth(s3_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        b1_name = 'bucky.1e'  # bucket 1
        b1_k1_name = b1_name + ".key.1"  # key1
        b1_k2_name = b1_name + ".key.2"  # key2
        b2_name = 'bucky.2e'  # bucket 2
        b2_k1_name = b2_name + ".key.1"  # key1
        b2_k2_name = b2_name + ".key.2"  # key2
        b1 = resuables.create_bucket(b1_name, rgw_conn, s3_user)
        b2 = resuables.create_bucket(b2_name, rgw_conn, s3_user)
        # enable versioning on b1
        resuables.enable_versioning(b1, rgw_conn, s3_user,
                                    write_bucket_io_info)
        # upload object to version enabled bucket b1
        obj_sizes = list(config.mapped_sizes.values())
        config.obj_size = obj_sizes[0]
        for vc in range(version_count):
            resuables.upload_object(b1_k1_name,
                                    b1,
                                    TEST_DATA_PATH,
                                    config,
                                    s3_user,
                                    append_data=True,
                                    append_msg='hello vc count: %s' % str(vc))
        # upload object to non version bucket b2
        config.obj_size = obj_sizes[1]
        resuables.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config,
                                s3_user)
        # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created
        # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present
        b1_k2 = s3lib.resource_op({
            'obj': rgw_conn,
            'resource': 'Object',
            'args': [b1.name, b1_k2_name]
        })
        b2_k2 = s3lib.resource_op({
            'obj': rgw_conn,
            'resource': 'Object',
            'args': [b2.name, b2_k2_name]
        })
        log.info(
            'copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket'
        )
        copy_response = b1_k2.copy_from(CopySource={
            'Bucket': b2.name,
            'Key': b2_k1_name,
        })
        log.info('copy_response: %s' % copy_response)
        if copy_response is None:
            raise TestExecError("copy object failed")
        log.info('checking if copies object has version id created')
        b1_k2_version_id = b1_k2.version_id
        log.info('version id: %s' % b1_k2_version_id)
        if b1_k2_version_id is None:
            raise TestExecError(
                'Version ID not created for the copied object on to the versioned enabled bucket'
            )
        else:
            log.info(
                'Version ID created for the copied object on to the versioned bucket'
            )
        all_objects_in_b1 = b1.objects.all()
        log.info('all objects in bucket 1')
        for obj in all_objects_in_b1:
            log.info('object_name: %s' % obj.key)
            versions = b1.object_versions.filter(Prefix=obj.key)
            log.info('displaying all versions of the object')
            for version in versions:
                log.info('key_name: %s --> version_id: %s' %
                         (version.object_key, version.version_id))
        log.info('-------------------------------------------')
        log.info(
            'copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket')
        copy_response = b2_k2.copy_from(CopySource={
            'Bucket': b1.name,
            'Key': b1_k1_name,
        })
        log.info('copy_response: %s' % copy_response)
        if copy_response is None:
            raise TestExecError("copy object failed")
        log.info('checking if copies object has version id created')
        b2_k2_version_id = b2_k2.version_id
        log.info('version id: %s' % b2_k2_version_id)
        if b2_k2_version_id is None:
            log.info(
                'Version ID not created for the copied object on to the non versioned bucket'
            )
        else:
            raise TestExecError(
                'Version ID created for the copied object on to the non versioned bucket'
            )
        all_objects_in_b2 = b2.objects.all()
        log.info('all objects in bucket 2')
        for obj in all_objects_in_b2:
            log.info('object_name: %s' % obj.key)
            versions = b2.object_versions.filter(Prefix=obj.key)
            log.info('displaying all versions of the object')
            for version in versions:
                log.info('key_name: %s --> version_id: %s' %
                         (version.object_key, version.version_id))

        test_info.success_status('test passed')
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
예제 #8
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    # create pool
    pool_name = '.rgw.buckets.special'
    pg_num = '8'
    pgp_num = '8'
    pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (
        pool_name, pg_num, pgp_num)
    pool_create_exec = utils.exec_shell_cmd(pool_create)
    if pool_create_exec is False:
        raise TestExecError("Pool creation failed")
    # create realm
    realm_name = 'buz-tickets'
    log.info('creating realm name')
    realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s' % realm_name
    realm_create_exec = utils.exec_shell_cmd(realm_create)
    if realm_create_exec is False:
        raise TestExecError("cmd execution failed")
    # sample output of create realm
    """
    {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
    """
    log.info('modify zonegroup ')
    modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master' % realm_name
    modify_exec = utils.exec_shell_cmd(modify)
    if modify_exec is False:
        raise TestExecError("cmd execution failed")
    # get the zonegroup
    zonegroup_file = 'zonegroup.json'
    get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file
    get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
    if get_zonegroup_exec is False:
        raise TestExecError("cmd execution failed")
    add_to_placement_targets = {"name": "special-placement", "tags": []}
    fp = open(zonegroup_file, 'r')
    zonegroup_txt = fp.read()
    fp.close()
    log.info('got zonegroup info: \n%s' % zonegroup_txt)
    zonegroup = json.loads(zonegroup_txt)
    log.info('adding placement targets')
    zonegroup['placement_targets'].append(add_to_placement_targets)
    with open(zonegroup_file, 'w') as fp:
        json.dump(zonegroup, fp)
    zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file
    zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
    if zonegroup_set_exec is False:
        raise TestExecError("cmd execution failed")
    log.info('zone group update completed')
    log.info('getting zone file')
    # get zone
    log.info('getting zone info')
    zone_file = 'zone.json'
    get_zone = 'sudo radosgw-admin zone --rgw-zone=default  get > zone.json'
    get_zone_exec = utils.exec_shell_cmd(get_zone)
    if get_zone_exec is False:
        raise TestExecError("cmd execution failed")
    fp = open(zone_file, 'r')
    zone_info = fp.read()
    fp.close()
    log.info('zone_info :\n%s' % zone_info)
    zone_info_cleaned = json.loads(zone_info)
    special_placement_info = {
        "key": "special-placement",
        "val": {
            "index_pool": ".rgw.buckets.index",
            "data_pool": ".rgw.buckets.special",
            "data_extra_pool": ".rgw.buckets.extra"
        }
    }
    log.info('adding  special placement info')
    zone_info_cleaned['placement_pools'].append(special_placement_info)
    with open(zone_file, 'w+') as fp:
        json.dump(zone_info_cleaned, fp)
    zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file
    zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
    if zone_file_set_exec is False:
        raise TestExecError("cmd execution failed")
    log.info('zone info updated ')
    zone_group_update_set = 'radosgw-admin period update --commit'
    zone_group_update_set_exec = utils.exec_shell_cmd(zone_group_update_set)
    log.info(zone_group_update_set_exec)
    restarted = rgw_service.restart()
    if restarted is False:
        raise TestExecError("service restart failed")
    if config.rgw_client == 'rgw':
        log.info('client type is rgw')
        rgw_user_info = s3_swift_lib.create_users(1)
        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()
        # create bucket
        bucket_name = utils.gen_bucket_name_from_userid(
            rgw_user_info['user_id'], 0)
        bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
        # create object
        s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                rgw_user_info)
    if config.rgw_client == 'swift':
        log.info('client type is swift')
        user_names = ['tuffy', 'scooby', 'max']
        tenant = 'tenant'
        umgmt = UserMgmt()
        umgmt.create_tenant_user(tenant_name=tenant,
                                 user_id=user_names[0],
                                 displayname=user_names[0])
        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0])
        auth = Auth(user_info)
        rgw = auth.do_auth()
        container_name = utils.gen_bucket_name_from_userid(
            user_info['user_id'], rand_no=0)
        container = s3_swift_lib.resource_op({
            'obj': rgw,
            'resource': 'put_container',
            'args': [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")

        swift_object_name = utils.gen_s3_object_name(
            '%s.container.%s' % (user_names[0], 0), 0)
        log.info('object name: %s' % swift_object_name)
        object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
        log.info('object path: %s' % object_path)
        object_size = utils.get_file_size(config.objects_size_range['min'],
                                          config.objects_size_range['max'])
        data_info = manage_data.io_generator(object_path, object_size)
        # upload object
        if data_info is False:
            TestExecError("data creation failed")
        log.info('uploading object: %s' % object_path)
        with open(object_path, 'r') as fp:
            rgw.put_object(container_name,
                           swift_object_name,
                           contents=fp.read(),
                           content_type='text/plain')
예제 #9
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()


    log.info('starting IO')
    config.max_objects_per_shard = 10
    config.no_of_shards = 10
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info('sharding configuration will be added now.')
    if config.sharding_type == 'dynamic':
        log.info('sharding type is dynamic')
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info('making changes to ceph.conf')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard))
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info('num_shards_expected: %s' % num_shards_expected)
        log.info('trying to restart services ')
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    config.bucket_count = 1
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1)
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    print(resuables.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info))
    upload_objects(user_info, bucket, config)

    if config.sharding_type == 'manual':
        log.info('sharding type is manual')
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info('in manual sharding')
        cmd_exec = utils.exec_shell_cmd('radosgw-admin bucket reshard --bucket=%s --num-shards=%s '
                                        '--yes-i-really-mean-it'
                                        % (bucket.name, config.no_of_shards))
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    # upload_objects(user_info, bucket, config)
    log.info('s3 objects to create: %s' % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, config.objects_count + oc)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)
    time.sleep(450)
    log.info('verification starts')
    op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name)
    json_doc = json.loads(op)
    bucket_id = json_doc['data']['bucket']['bucket_id']
    op2 = utils.exec_shell_cmd("radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id))
    json_doc2 = json.loads((op2))
    num_shards_created = json_doc2['data']['bucket_info']['num_shards']
    log.info('no_of_shards_created: %s' % num_shards_created)
    if config.sharding_type == 'manual':
        if config.no_of_shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info('Expected number of shards created')
    if config.sharding_type == 'dynamic':
        log.info('for dynamic, '
                 'number of shards created should be greater than or equal to number of  expected shards')
        log.info('no_of_shards_expected: %s' % num_shards_expected)
        if int(num_shards_created) >= int(num_shards_expected):
            log.info('Expected number of shards created')
        else:
            raise TestExecError('Expected number of shards not created')
예제 #10
0
def test_exec(config):
    test_info = AddTestInfo(
        'create m buckets with n objects with bucket life cycle')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user, ssl=config.ssl)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            if config.test_ops['create_bucket'] is True:
                log.info('no of buckets to create: %s' % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=1)
                    bucket = resuables.create_bucket(bucket_name, rgw_conn,
                                                     each_user)
                    if config.test_ops['enable_versioning'] is True:
                        log.info('bucket versionig test on bucket: %s' %
                                 bucket.name)
                        # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                        bucket_versioning = s3lib.resource_op({
                            'obj':
                            rgw_conn,
                            'resource':
                            'BucketVersioning',
                            'args': [bucket.name]
                        })
                        version_status = s3lib.resource_op({
                            'obj': bucket_versioning,
                            'resource': 'status',
                            'args': None
                        })
                        if version_status is None:
                            log.info('bucket versioning still not enabled')
                        # enabling bucket versioning
                        version_enable_status = s3lib.resource_op({
                            'obj': bucket_versioning,
                            'resource': 'enable',
                            'args': None
                        })
                        response = HttpResponseParser(version_enable_status)
                        if response.status_code == 200:
                            log.info('version enabled')
                        else:
                            raise TestExecError("version enable failed")
                    if config.test_ops['create_object'] is True:
                        # upload data
                        for oc, size in list(config.mapped_sizes.items()):
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket.name, oc)
                            if config.test_ops['version_count'] > 0:
                                for vc in range(
                                        config.test_ops['version_count']):
                                    log.info('version count for %s is %s' %
                                             (s3_object_name, str(vc)))
                                    log.info('modifying data: %s' %
                                             s3_object_name)
                                    resuables.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                        append_data=True,
                                        append_msg=
                                        'hello object for version: %s\n' %
                                        str(vc))
                            else:
                                log.info('s3 objects to create: %s' %
                                         config.objects_count)
                                resuables.upload_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                    bucket_life_cycle = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'BucketLifecycleConfiguration',
                        'args': [bucket.name]
                    })
                    life_cycle = basic_lifecycle_config(prefix="key",
                                                        days=20,
                                                        id="rul1")
                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle)
                    })
                    log.info('put bucket life cycle:\n%s' %
                             put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info('bucket life cycle added')
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info('trying to retrieve bucket lifecycle config')
                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        'get_bucket_lifecycle_configuration',
                        "kwargs":
                        dict(Bucket=bucket.name)
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)
                        if response.status_code == 200:
                            log.info('bucket life cycle retrieved')
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")
                    else:
                        raise TestExecError("bucket life cycle retrieved")
                    if config.test_ops['create_object'] is True:
                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(
                                bucket.name, oc)
                            if config.test_ops['version_count'] > 0:
                                if config.test_ops.get(
                                        'delete_versioned_object',
                                        None) is True:
                                    log.info(
                                        'list all the versions of the object and delete the '
                                        'current version of the object')
                                    log.info(
                                        'all versions for the object: %s\n' %
                                        s3_object_name)
                                    versions = bucket.object_versions.filter(
                                        Prefix=s3_object_name)
                                    t1 = []
                                    for version in versions:
                                        log.info(
                                            'key_name: %s --> version_id: %s' %
                                            (version.object_key,
                                             version.version_id))
                                        t1.append(version.version_id)
                                    s3_object = s3lib.resource_op({
                                        'obj':
                                        rgw_conn,
                                        'resource':
                                        'Object',
                                        'args': [bucket.name, s3_object_name]
                                    })
                                    # log.info('object version to delete: %s -> %s' % (versions[0].object_key,
                                    #                                                 versions[0].version_id))
                                    delete_response = s3_object.delete()
                                    log.info('delete response: %s' %
                                             delete_response)
                                    if delete_response['DeleteMarker'] is True:
                                        log.info(
                                            'object delete marker is set to true'
                                        )
                                    else:
                                        raise TestExecError(
                                            "'object delete marker is set to false"
                                        )
                                    log.info(
                                        'available versions for the object after delete marker is set'
                                    )
                                    t2 = []
                                    versions_after_delete_marker_is_set = bucket.object_versions.filter(
                                        Prefix=s3_object_name)
                                    for version in versions_after_delete_marker_is_set:
                                        log.info(
                                            'key_name: %s --> version_id: %s' %
                                            (version.object_key,
                                             version.version_id))
                                        t2.append(version.version_id)
                                    t2.pop()
                                    if t1 == t2:
                                        log.info('versions remained intact')
                                    else:
                                        raise TestExecError(
                                            'versions are not intact after delete marker is set'
                                        )
                    # modify bucket lifecycle configuration, modify expiration days here for the test case.
                    if config.test_ops.get('modify_lifecycle', False) is True:
                        log.info('modifying lifecycle configuration')
                        life_cycle_modifed = basic_lifecycle_config(
                            prefix="key",
                            days=15,
                            id="rul1",
                            status="Disabled")
                        put_bucket_life_cycle = s3lib.resource_op({
                            "obj":
                            bucket_life_cycle,
                            "resource":
                            "put",
                            "kwargs":
                            dict(LifecycleConfiguration=life_cycle_modifed)
                        })
                        log.info('put bucket life cycle:\n%s' %
                                 put_bucket_life_cycle)
                        if put_bucket_life_cycle is False:
                            raise TestExecError(
                                "Resource execution failed: bucket creation faield"
                            )
                        if put_bucket_life_cycle is not None:
                            response = HttpResponseParser(
                                put_bucket_life_cycle)

                            if response.status_code == 200:
                                log.info('bucket life cycle added')

                            else:
                                raise TestExecError(
                                    "bucket lifecycle addition failed")
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                        log.info('trying to retrieve bucket lifecycle config')
                        get_bucket_life_cycle_config = s3lib.resource_op({
                            "obj":
                            rgw_conn2,
                            "resource":
                            'get_bucket_lifecycle_configuration',
                            "kwargs":
                            dict(Bucket=bucket.name)
                        })
                        if get_bucket_life_cycle_config is False:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")
                        if get_bucket_life_cycle_config is not None:
                            response = HttpResponseParser(
                                get_bucket_life_cycle_config)
                            modified_expiration_days = get_bucket_life_cycle_config[
                                'Rules'][0]['Expiration']['Days']
                            log.info('modified expiration days: %s' %
                                     modified_expiration_days)
                            if response.status_code == 200 and modified_expiration_days == 15:
                                log.info(
                                    'bucket life cycle retrieved after modifying'
                                )
                            else:
                                raise TestExecError(
                                    "bucket lifecycle config retrieval failed after modifying"
                                )
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after modifying"
                            )
                    # disable bucket lifecycle configuration
                    if config.test_ops.get('disable_lifecycle', False) is True:
                        log.info('disabling lifecycle configuration')
                        life_cycle_disabled_config = basic_lifecycle_config(
                            prefix="key",
                            days=20,
                            id="rul1",
                            status="Disabled")
                        put_bucket_life_cycle = s3lib.resource_op({
                            "obj":
                            bucket_life_cycle,
                            "resource":
                            "put",
                            "kwargs":
                            dict(LifecycleConfiguration=
                                 life_cycle_disabled_config)
                        })
                        log.info('put bucket life cycle:\n%s' %
                                 put_bucket_life_cycle)
                        if put_bucket_life_cycle is False:
                            raise TestExecError(
                                "Resource execution failed: bucket creation faield"
                            )
                        if put_bucket_life_cycle is not None:
                            response = HttpResponseParser(
                                put_bucket_life_cycle)
                            if response.status_code == 200:
                                log.info('bucket life cycle added')
                            else:
                                raise TestExecError(
                                    "bucket lifecycle addition failed")
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                        log.info('trying to retrieve bucket lifecycle config')
                        get_bucket_life_cycle_config = s3lib.resource_op({
                            "obj":
                            rgw_conn2,
                            "resource":
                            'get_bucket_lifecycle_configuration',
                            "kwargs":
                            dict(Bucket=bucket.name)
                        })
                        if get_bucket_life_cycle_config is False:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")
                        if get_bucket_life_cycle_config is not None:
                            response = HttpResponseParser(
                                get_bucket_life_cycle_config)
                            if response.status_code == 200 and get_bucket_life_cycle_config[
                                    'Rules'][0]['Status'] == 'Disabled':
                                log.info('disabled_status: %s' %
                                         get_bucket_life_cycle_config['Rules']
                                         [0]['Status'])
                                log.info(
                                    'bucket life cycle retrieved after disabled'
                                )
                            else:
                                raise TestExecError(
                                    "bucket lifecycle config retrieval failed after disabled"
                                )
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after disabled"
                            )
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    non_ten_buckets = {}
    ten_buckets = {}
    user_names = ['bill', 'newbill', 'joe', 'newjoe']
    tenant1 = 'tenant'
    non_ten_users = s3lib.create_users(config.user_count)
    ten_users = s3lib.create_tenant_users(config.user_count, tenant1)
    # Rename users
    if config.test_ops['rename_users'] is True:
        for user in non_ten_users:
            new_non_ten_name = 'new' + user['user_id']
            out = resuables.rename_user(user['user_id'], new_non_ten_name)
            if out is False:
                raise TestExecError("RGW User rename error")
            log.info('output :%s' % out)
            user['user_id'] = new_non_ten_name

        for ten_user in ten_users:
            new_ten_name = 'new' + ten_user['user_id']
            out1 = resuables.rename_user(ten_user['user_id'], new_ten_name, tenant1)
            if out1 is False:
                raise TestExecError("RGW User rename error")
            log.info('output :%s' % out1)
            ten_user['user_id'] = new_ten_name
    # create buckets and test rename
    for user in non_ten_users:
        auth = Auth(user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        bucket_name_to_create1 = utils.gen_bucket_name_from_userid(user['user_id'])
        log.info('creating bucket with name: %s' % bucket_name_to_create1)
        bucket = resuables.create_bucket(bucket_name_to_create1, rgw_conn, user)
        non_ten_buckets[user['user_id']] = bucket_name_to_create1
        if config.test_ops['rename_buckets'] is True:
            bucket_new_name1 = 'new' + bucket_name_to_create1
            non_ten_buckets[user['user_id']] = bucket_new_name1
            out2 = resuables.rename_bucket(bucket.name, bucket_new_name1, user['user_id'])
            if out2 is False:
                raise TestExecError("RGW Bucket rename error")
            log.info('output :%s' % out2)

    for ten_user in ten_users:
        auth = Auth(ten_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        bucket_name_to_create2 = utils.gen_bucket_name_from_userid(ten_user['user_id'])
        log.info('creating bucket with name: %s' % bucket_name_to_create2)
        bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn, ten_user)
        ten_buckets[ten_user['user_id']] = bucket_name_to_create2
        if config.test_ops['rename_buckets'] is True:
            bucket_new_name2 = 'new' + bucket_name_to_create2
            ten_buckets[ten_user['user_id']] = bucket_new_name2
            out3 = resuables.rename_bucket(bucket.name, bucket_new_name2, ten_user['user_id'], tenant1)
            if out3 is False:
                raise TestExecError("RGW Bucket rename error")
            log.info('output :%s' % out3)
    if config.test_ops['bucket_link_unlink'] is True:
        # Bucket unlink and link from non tenanted to tenanted users
        out4 = resuables.unlink_bucket(non_ten_users[0]['user_id'], non_ten_buckets[non_ten_users[0]['user_id']])
        if out4 is False:
            raise TestExecError("RGW Bucket unlink error")
        log.info('output :%s' % out4)
        resuables.link_chown_to_tenanted(ten_users[0]['user_id'], non_ten_buckets[non_ten_users[0]['user_id']], tenant1)

        # Bucket unlink and link from tenanted to non tenanted users
        out5 = resuables.unlink_bucket(ten_users[0]['user_id'], ten_buckets[ten_users[0]['user_id']], tenant1)
        if out5 is False:
            raise TestExecError("RGW Bucket unlink error")
        log.info('output :%s' % out5)
        resuables.link_chown_to_nontenanted(non_ten_users[0]['user_id'], ten_buckets[ten_users[0]['user_id']], tenant1)
예제 #12
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # preparing data
    user_names = ['user1', 'user2', 'user3']
    Bucket_names = ['bucket1', 'bucket2', 'bucket3']
    object_names = ['o1', 'o2']
    tenant1 = 'tenant1'
    tenant2 = 'tenant2'
    t1_u1_info = create_tenant_user(tenant_name=tenant1, user_id=user_names[0])
    t1_u1_auth = Auth(t1_u1_info, ssl=config.ssl)
    t1_u1 = t1_u1_auth.do_auth()
    t2_u1_info = create_tenant_user(tenant_name=tenant2, user_id=user_names[0])
    t2_u1_auth = Auth(t2_u1_info, ssl=config.ssl)
    t2_u1 = t2_u1_auth.do_auth()
    t1_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0], rgw=t1_u1, user_info=t1_u1_info)
    t2_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0], rgw=t2_u1, user_info=t2_u1_info)
    obj_sizes = list(config.mapped_sizes.values())
    config.obj_size = obj_sizes[0]
    resuables.upload_object(s3_object_name=object_names[0],
                            bucket=t1_u1_b1,
                            TEST_DATA_PATH=TEST_DATA_PATH,
                            config=config, user_info=t1_u1_info)
    config.obj_size = obj_sizes[1]
    resuables.upload_object(s3_object_name=object_names[0],
                            bucket=t2_u1_b1,
                            TEST_DATA_PATH=TEST_DATA_PATH,
                            config=config, user_info=t1_u1_info)
    t2_u2_info = create_tenant_user(tenant_name=tenant2, user_id=user_names[1])
    t2_u2_auth = Auth(t2_u2_info, ssl=config.ssl)
    t2_u2 = t2_u2_auth.do_auth()
    # will try to access the bucket and objects in both tenants
    # access t1_u1_b1
    log.info('trying to access tenant1->user1->bucket1')
    t1_u1_b1_from_t2_u2 = s3lib.resource_op({'obj': t2_u2,
                                             'resource': 'Bucket',
                                             'args': [Bucket_names[0]]})
    log.info('trying to download tenant1->user1->bucket1->object1 from tenant2->user2' )
    download_path1 = TEST_DATA_PATH + "/t1_u1_b1_%s.download" % object_names[0]
    t1_u1_b1_o1_download = s3lib.resource_op({'obj': t1_u1_b1_from_t2_u2,
                                              'resource': 'download_file',
                                              'args': [object_names[0], download_path1 ]})
    if t1_u1_b1_o1_download is False:
        log.info('object not downloaded\n')
    if t1_u1_b1_o1_download is None:
        raise TestExecError("object downloaded for tenant1->user1->bucket1->object1, this should not happen")

    log.info('trying to access tenant2->user1->bucket1 from user2 in tenant 2')

    t2_u1_b1_from_t2_u2 = s3lib.resource_op({'obj': t2_u2,
                                             'resource': 'Bucket',
                                             'args': [Bucket_names[0]]})
    log.info('trying to download tenant2->user1->bucket1->object1 from tenant2->user2')
    download_path2 = TEST_DATA_PATH + "/t2_u1_b1_%s.download" % object_names[0]
    t2_u1_b1_o1_download = s3lib.resource_op({'obj': t2_u1_b1_from_t2_u2,
                                              'resource': 'download_file',
                                              'args': [object_names[0], download_path2]})
    if t2_u1_b1_o1_download is False:
        log.info('object did not download, worked as expected')
    if t1_u1_b1_o1_download is None:
        raise TestExecError('object downloaded\n'
                            'downloaded tenant2->user1->bucket1->object1, this should not happen')
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    if config.test_ops.get('encryption_algorithm', None) is not None:
        log.info('encryption enabled, making ceph config changes')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_crypt_require_ssl,
                                   "false")
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        if config.use_aws4 is True:
            rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
        else:
            rgw_conn = auth.do_auth()
        # enabling sharding
        if config.test_ops['sharding']['enable'] is True:
            log.info('enabling sharding on buckets')
            max_shards = config.test_ops['sharding']['max_shards']
            log.info('making changes to ceph.conf')
            ceph_conf.set_to_ceph_conf(
                'global', ConfigOpts.rgw_override_bucket_index_max_shards,
                str(max_shards))
            log.info('trying to restart services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        if config.test_ops['compression']['enable'] is True:
            compression_type = config.test_ops['compression']['type']
            log.info('enabling compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=%s' % (zone,compression_type)
            out = utils.exec_shell_cmd(cmd)
            try:
                data = json.loads(out)
                if data['placement_pools'][0]['val']['storage_classes'][
                        'STANDARD']['compression_type'] == compression_type:
                    log.info('Compression enabled successfully')
                else:
                    raise ValueError('failed to enable compression')
            except ValueError as e:
                exit(str(e))
            log.info('trying to restart rgw services ')
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        # create buckets
        if config.test_ops['create_bucket'] is True:
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                bucket = resuables.create_bucket(bucket_name_to_create,
                                                 rgw_conn, each_user)
                if config.test_ops['create_object'] is True:
                    # uploading data
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(
                            bucket_name_to_create, oc)
                        log.info('s3 object name: %s' % s3_object_name)
                        s3_object_path = os.path.join(TEST_DATA_PATH,
                                                      s3_object_name)
                        log.info('s3 object path: %s' % s3_object_path)
                        if config.test_ops.get('upload_type') == 'multipart':
                            log.info('upload type: multipart')
                            resuables.upload_mutipart_object(
                                s3_object_name, bucket, TEST_DATA_PATH, config,
                                each_user)
                        else:
                            log.info('upload type: normal')
                            resuables.upload_object(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    each_user)
                        if config.test_ops['download_object'] is True:
                            log.info('trying to download object: %s' %
                                     s3_object_name)
                            s3_object_download_name = s3_object_name + "." + "download"
                            s3_object_download_path = os.path.join(
                                TEST_DATA_PATH, s3_object_download_name)
                            log.info('s3_object_download_path: %s' %
                                     s3_object_download_path)
                            log.info('downloading to filename: %s' %
                                     s3_object_download_name)
                            if config.test_ops.get('encryption_algorithm',
                                                   None) is not None:
                                log.info('encryption download')
                                log.info(
                                    'encryption algorithm: %s' %
                                    config.test_ops['encryption_algorithm'])
                                object_downloaded_status = bucket.download_file(
                                    s3_object_name,
                                    s3_object_download_path,
                                    ExtraArgs={
                                        'SSECustomerKey':
                                        encryption_key,
                                        'SSECustomerAlgorithm':
                                        config.test_ops['encryption_algorithm']
                                    })
                            else:
                                object_downloaded_status = s3lib.resource_op({
                                    'obj':
                                    bucket,
                                    'resource':
                                    'download_file',
                                    'args':
                                    [s3_object_name, s3_object_download_path],
                                })
                            if object_downloaded_status is False:
                                raise TestExecError(
                                    "Resource execution failed: object download failed"
                                )
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            s3_object_downloaded_md5 = utils.get_md5(
                                s3_object_download_path)
                            s3_object_uploaded_md5 = utils.get_md5(
                                s3_object_path)
                            log.info('s3_object_downloaded_md5: %s' %
                                     s3_object_downloaded_md5)
                            log.info('s3_object_uploaded_md5: %s' %
                                     s3_object_uploaded_md5)
                            if str(s3_object_uploaded_md5) == str(
                                    s3_object_downloaded_md5):
                                log.info('md5 match')
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_download_path)
                            else:
                                raise TestExecError('md5 mismatch')
                        if config.local_file_delete is True:
                            log.info(
                                'deleting local file created after the upload')
                            utils.exec_shell_cmd('rm -rf %s' % s3_object_path)
                    # verification of shards after upload
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s | grep bucket_id' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace(
                            '"',
                            '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls | grep %s' % b_id
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            'got output from sharing verification.--------')
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    # print out bucket stats and verify in logs for compressed data by
                    # comparing size_kb_utilized and size_kb_actual
                    if config.test_ops['compression']['enable'] is True:
                        cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                        out = utils.exec_shell_cmd(cmd)
                    if config.test_ops['delete_bucket_object'] is True:
                        log.info('listing all objects in bucket: %s' %
                                 bucket.name)
                        objects = s3lib.resource_op({
                            'obj': bucket,
                            'resource': 'objects',
                            'args': None
                        })
                        log.info('objects :%s' % objects)
                        all_objects = s3lib.resource_op({
                            'obj': objects,
                            'resource': 'all',
                            'args': None
                        })
                        log.info('all objects: %s' % all_objects)
                        for obj in all_objects:
                            log.info('object_name: %s' % obj.key)
                        log.info('deleting all objects in bucket')
                        objects_deleted = s3lib.resource_op({
                            'obj': objects,
                            'resource': 'delete',
                            'args': None
                        })
                        log.info('objects_deleted: %s' % objects_deleted)
                        if objects_deleted is False:
                            raise TestExecError(
                                'Resource execution failed: Object deletion failed'
                            )
                        if objects_deleted is not None:
                            response = HttpResponseParser(objects_deleted[0])
                            if response.status_code == 200:
                                log.info('objects deleted ')
                            else:
                                raise TestExecError("objects deletion failed")
                        else:
                            raise TestExecError("objects deletion failed")
                        log.info('deleting bucket: %s' % bucket.name)
                        # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete')
                        bucket_deleted_status = s3lib.resource_op({
                            'obj': bucket,
                            'resource': 'delete',
                            'args': None
                        })
                        log.info('bucket_deleted_status: %s' %
                                 bucket_deleted_status)
                        if bucket_deleted_status is not None:
                            response = HttpResponseParser(
                                bucket_deleted_status)
                            if response.status_code == 204:
                                log.info('bucket deleted ')
                            else:
                                raise TestExecError("bucket deletion failed")
                        else:
                            raise TestExecError("bucket deletion failed")
        # disable compression after test
        if config.test_ops['compression']['enable'] is True:
            log.info('disable compression')
            cmd = 'radosgw-admin zone get'
            out = utils.exec_shell_cmd(cmd)
            zone = json.loads(out)
            zone = zone.get("name")
            cmd = 'radosgw-admin zone placement modify --rgw-zone=%s ' \
                  '--placement-id=default-placement --compression=none' % zone
            out = utils.exec_shell_cmd(cmd)
            srv_restarted = rgw_service.restart()
            time.sleep(10)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
예제 #14
0
def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # create user
        config.user_count = 1
        tenant1 = 'MountEverest'
        tenant2 = 'Himalayas'
        tenant1_user_info = s3lib.create_tenant_users(
            tenant_name=tenant1, no_of_users_to_create=config.user_count)
        tenant1_user1_info = tenant1_user_info[0]
        tenant2_user_info = s3lib.create_tenant_users(
            tenant_name=tenant2, no_of_users_to_create=config.user_count)
        tenant2_user1_info = tenant2_user_info[0]
        tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
        tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
        rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
        rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
        rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
        rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()

        # steps
        # create bucket in tenant1 for user1
        # generate bucket policy to user1 in tenant1, policy: list access to user1 in tenant2
        # add the policy to user1 in bucket1
        # # testing
        # modify bucket policy to replace the existing policy - TC 11215
        # add policy to the existing policy - TC 11214

        bucket_name1 = utils.gen_bucket_name_from_userid(
            tenant1_user1_info['user_id'], rand_no=1)
        t1_u1_bucket1 = resuables.create_bucket(
            bucket_name1,
            rgw_tenant1_user1,
            tenant1_user1_info,
        )
        bucket_name2 = utils.gen_bucket_name_from_userid(
            tenant1_user1_info['user_id'], rand_no=2)
        t1_u1_bucket2 = resuables.create_bucket(
            bucket_name2,
            rgw_tenant1_user1,
            tenant1_user1_info,
        )
        bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info['user_id']],
            actions_list=['CreateBucket'],
            resources=[t1_u1_bucket1.name])
        bucket_policy = json.dumps(bucket_policy_generated)
        log.info('jsoned policy:%s\n' % bucket_policy)
        log.info('bucket_policy_generated:%s\n' % bucket_policy_generated)
        bucket_policy_obj = s3lib.resource_op({
            'obj': rgw_tenant1_user1,
            'resource': 'BucketPolicy',
            'args': [t1_u1_bucket1.name]
        })
        put_policy = s3lib.resource_op({
            'obj':
            bucket_policy_obj,
            'resource':
            'put',
            'kwargs':
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy)
        })
        log.info('put policy response:%s\n' % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200:
                log.info('bucket policy created')
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        # get policy
        get_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        log.info('got bucket policy:%s\n' % get_policy['Policy'])
        # modifying bucket policy to take new policy
        if config.bucket_policy_op == 'modify':
            # adding new action list: ListBucket to existing action: CreateBucket
            log.info('modifying buckey policy')
            actions_list = ['ListBucket', 'CreateBucket']
            actions = list(map(s3_bucket_policy.gen_action, actions_list))
            bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
                tenants_list=[tenant1],
                userids_list=[tenant2_user1_info['user_id']],
                actions_list=actions_list,
                resources=[t1_u1_bucket1.name])
            bucket_policy2 = json.dumps(bucket_policy2_generated)
            put_policy = s3lib.resource_op({
                'obj':
                bucket_policy_obj,
                'resource':
                'put',
                'kwargs':
                dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2)
            })
            log.info('put policy response:%s\n' % put_policy)
            if put_policy is False:
                raise TestExecError(
                    "Resource execution failed: bucket creation faield")
            if put_policy is not None:
                response = HttpResponseParser(put_policy)
                if response.status_code == 200:
                    log.info('bucket policy created')
                else:
                    raise TestExecError("bucket policy creation failed")
            else:
                raise TestExecError("bucket policy creation failed")
            get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
                Bucket=t1_u1_bucket1.name)
            modified_policy = json.loads(get_modified_policy['Policy'])
            log.info('got bucket policy:%s\n' % modified_policy)
            actions_list_from_modified_policy = modified_policy['Statement'][
                0]['Action']
            cleaned_actions_list_from_modified_policy = list(
                map(str, actions_list_from_modified_policy))
            log.info('cleaned_actions_list_from_modified_policy: %s' %
                     cleaned_actions_list_from_modified_policy)
            log.info('actions list to be modified: %s' % actions)
            cmp_val = utils.cmp(actions,
                                cleaned_actions_list_from_modified_policy)
            log.info('cmp_val: %s' % cmp_val)
            if cmp_val != 0:
                raise TestExecError("modification of bucket policy failed ")
        if config.bucket_policy_op == 'replace':
            log.info('replacing new bucket policy')
            new_policy_generated = s3_bucket_policy.gen_bucket_policy(
                tenants_list=[tenant1],
                userids_list=[tenant2_user1_info['user_id']],
                actions_list=['ListBucket'],
                resources=[t1_u1_bucket2.name])
            new_policy = json.dumps(new_policy_generated)
            put_policy = s3lib.resource_op({
                'obj':
                bucket_policy_obj,
                'resource':
                'put',
                'kwargs':
                dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy)
            })
            log.info('put policy response:%s\n' % put_policy)
            if put_policy is False:
                raise TestExecError(
                    "Resource execution failed: bucket creation faield")
            if put_policy is not None:
                response = HttpResponseParser(put_policy)
                if response.status_code == 200:
                    log.info('new bucket policy created')
                else:
                    raise TestExecError("bucket policy creation failed")
            else:
                raise TestExecError("bucket policy creation failed")
        if config.bucket_policy_op == 'delete':
            log.info('in delete bucket policy')
            delete_policy = s3lib.resource_op({
                'obj': bucket_policy_obj,
                'resource': 'delete',
                'args': None
            })
            if delete_policy is False:
                raise TestExecError(
                    "Resource execution failed: bucket creation faield")
            if delete_policy is not None:
                response = HttpResponseParser(delete_policy)
                if response.status_code == 200:
                    log.info('bucket policy deleted')
                else:
                    raise TestExecError("bucket policy deletion failed")
            else:
                raise TestExecError("bucket policy deletion failed")
            # confirming once again by calling get_bucket_policy
            try:
                rgw_tenant1_user1_c.get_bucket_policy(
                    Bucket=t1_u1_bucket1.name)
                raise TestExecError("bucket policy did not get deleted")
            except boto3exception.ClientError as e:
                log.info(e.response)
                response = HttpResponseParser(e.response)
                if response.error['Code'] == 'NoSuchBucketPolicy':
                    log.info('bucket policy deleted')
                else:
                    raise TestExecError("bucket policy did not get deleted")
            # log.info('get_policy after deletion: %s' % get_policy)
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
예제 #15
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    config.rgw_lc_debug_interval = 30
    config.rgw_lc_max_worker = 10
    log.info('making changes to ceph.conf')
    ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_debug_interval,
                               str(config.rgw_lc_debug_interval))
    ceph_version = utils.exec_shell_cmd("ceph version")
    op = ceph_version.split()
    for i in op:
        if i == 'nautilus':
            ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_lc_max_worker,
                                       str(config.rgw_lc_max_worker))
    log.info('trying to restart services')
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info('RGW service restarted')

    # create user
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    rgw_conn2 = auth.do_auth_using_client()
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'],
                                                    rand_no=1)
    obj_list = []
    obj_tag = 'suffix1=WMV1'
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    prefix = list(
        map(lambda x: x, [
            rule['Filter'].get('Prefix') or rule['Filter']['And'].get('Prefix')
            for rule in config.lifecycle_ops
        ]))
    prefix = prefix if prefix else ['dummy1']
    if config.test_ops['enable_versioning'] is True:
        resuables.enable_versioning(bucket, rgw_conn, user_info,
                                    write_bucket_io_info)
        if config.test_ops['create_object'] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + '.' + bucket.name + '.' + str(oc)
                obj_list.append(s3_object_name)
                if config.test_ops['version_count'] > 0:
                    for vc in range(config.test_ops['version_count']):
                        log.info('version count for %s is %s' %
                                 (s3_object_name, str(vc)))
                        log.info('modifying data: %s' % s3_object_name)
                        resuables.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            user_info,
                            append_data=True,
                            append_msg='hello object for version: %s\n' %
                            str(vc))
                else:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    resuables.upload_object(s3_object_name, bucket,
                                            TEST_DATA_PATH, config, user_info)

        life_cycle_rule = {"Rules": config.lifecycle_ops}
        resuables.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                                life_cycle_rule)
        lc_ops.validate_prefix_rule(bucket, config)
        if config.test_ops['delete_marker'] is True:
            life_cycle_rule_new = {"Rules": config.delete_marker_ops}
            resuables.put_get_bucket_lifecycle_test(bucket, rgw_conn,
                                                    rgw_conn2,
                                                    life_cycle_rule_new)
    if config.test_ops['enable_versioning'] is False:
        if config.test_ops['create_object'] is True:
            for oc, size in list(config.mapped_sizes.items()):
                config.obj_size = size
                key = prefix.pop()
                prefix.insert(0, key)
                s3_object_name = key + '.' + bucket.name + '.' + str(oc)
                obj_list.append(s3_object_name)
                resuables.upload_object_with_tagging(s3_object_name, bucket,
                                                     TEST_DATA_PATH, config,
                                                     user_info, obj_tag)
        life_cycle_rule = {"Rules": config.lifecycle_ops}
        resuables.put_get_bucket_lifecycle_test(bucket, rgw_conn, rgw_conn2,
                                                life_cycle_rule)
        lc_ops.validate_and_rule(bucket, config)
    resuables.remove_user(user_info)