def test_exec(config, requester):
    test_info = AddTestInfo('Bucket Request Payer')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    log.info('requester type: %s' % requester)

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = resuables.create_bucket(bucket_name=bucket_name_to_create, rgw=rgw_conn, user_info=each_user)
                bucket_request_payer = s3lib.resource_op({'obj': rgw_conn,
                                                          'resource': 'BucketRequestPayment',
                                                          'args': [bucket.name]
                                                          })
                # change the bucket request payer to 'requester'
                payer = {'Payer': requester}
                response = s3lib.resource_op({'obj': bucket_request_payer,
                                              'resource': 'put',
                                              'kwargs': dict(RequestPaymentConfiguration=payer)})
                log.info(response)
                if response is not None:
                    response = HttpResponseParser(response)
                    if response.status_code == 200:
                        log.info('bucket created')
                    else:
                        raise TestExecError("bucket request payer modification failed")
                else:
                    raise TestExecError("bucket request payer modification failed")
                payer = bucket_request_payer.payer
                log.info('bucket request payer: %s' % payer)
                if payer != 'Requester':
                    TestExecError('Request payer is not set or changed properly ')
                log.info('s3 objects to create: %s' % config.objects_count)
                if config.objects_count is not None:
                    for oc, size in config.mapped_sizes.items():
                        config.obj_size = size
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Ejemplo n.º 2
0
def upload_objects(user_info, bucket, config):

    log.info('s3 objects to create: %s' % config.objects_count)

    for oc in range(config.objects_count):
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)

        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                user_info)
Ejemplo n.º 3
0
def test_exec(config):
    test_info = AddTestInfo('Test Byte range')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=1)
                bucket = resuables.create_bucket(bucket_name, rgw_conn,
                                                 each_user)
                # uploading data
                log.info('s3 objects to create: %s' % config.objects_count)
                for oc, size in config.mapped_sizes.items():
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    resuables.upload_object(s3_object_name, bucket,
                                            TEST_DATA_PATH, config, each_user)
                    log.info('testing for negative range')
                    response = rgw_conn2.get_object(Bucket=bucket.name,
                                                    Key=s3_object_name,
                                                    Range='-2--1')
                    log.info('response: %s\n' % response)
                    log.info('Content-Lenght: %s' % response['ContentLength'])
                    log.info('s3_object_size: %s' %
                             (config.obj_size * 1024 * 1024))
                    if response[
                            'ContentLength'] != config.obj_size * 1024 * 1024:
                        TestExecError("Content Lenght not matched")
                    log.info('testing for one positive and one negative range')
                    response = rgw_conn2.get_object(Bucket=bucket.name,
                                                    Key=s3_object_name,
                                                    Range='-1-3')
                    log.info('response: %s\n' % response)
                    log.info('Content-Length: %s' % response['ContentLength'])
                    log.info('s3_object_size: %s' %
                             (config.obj_size * 1024 * 1024))
                    if response[
                            'ContentLength'] != config.obj_size * 1024 * 1024:
                        TestExecError("Content Lenght not matched")

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:

        test_info.started_info()

        # preparing data

        user_names = ['user1', 'user2', 'user3']
        Bucket_names = ['bucket1', 'bucket2', 'bucket3']
        object_names = ['o1', 'o2']
        tenant1 = 'tenant1'
        tenant2 = 'tenant2'

        t1_u1_info = create_tenant_user(tenant_name=tenant1,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t1_u1_auth = Auth(t1_u1_info)
        t1_u1 = t1_u1_auth.do_auth()

        t2_u1_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t2_u1_auth = Auth(t2_u1_info)
        t2_u1 = t2_u1_auth.do_auth()

        t1_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t1_u1,
                                           user_info=t1_u1_info)

        t2_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t2_u1,
                                           user_info=t2_u1_info)

        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t1_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)

        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t2_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)

        t2_u2_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[1],
                                        cluster_name=config.cluster_name)
        t2_u2_auth = Auth(t2_u2_info)
        t2_u2 = t2_u2_auth.do_auth()

        # will try to access the bucket and objects in both tenants

        # access t1_u1_b1

        log.info('trying to access tenant1->user1->bucket1')

        t1_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })

        log.info(
            'trying to download tenant1->user1->bucket1->object1 from tenant2->user2'
        )

        download_path1 = TEST_DATA_PATH + "/t1_u1_b1_%s.download" % object_names[
            0]

        t1_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t1_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path1]
        })

        if t1_u1_b1_o1_download is False:
            log.info('object not downloaded\n')

        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                "object downloaded for tenant1->user1->bucket1->object1, this should not happen"
            )

        log.info(
            'trying to access tenant2->user1->bucket1 from user2 in tenant 2')

        t2_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })

        log.info(
            'trying to download tenant2->user1->bucket1->object1 from tenant2->user2'
        )

        download_path2 = TEST_DATA_PATH + "/t2_u1_b1_%s.download" % object_names[
            0]

        t2_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t2_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path2]
        })

        if t2_u1_b1_o1_download is False:
            log.info('object did not download, worked as expected')

        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                'object downloaded\n'
                'downloaded tenant2->user1->bucket1->object1, this should not happen'
            )

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Ejemplo n.º 5
0
def test_exec(config):

    test_info = AddTestInfo('RGW Dynamic Resharding test')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    try:

        test_info.started_info()

        log.info('starting IO')

        config.max_objects_per_shard = 10
        config.no_of_shards = 10

        config.user_count = 1

        user_info = s3lib.create_users(config.user_count)
        user_info = user_info[0]

        auth = Auth(user_info)
        rgw_conn = auth.do_auth()

        config.bucket_count = 1

        log.info('no of buckets to create: %s' % config.bucket_count)

        bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'],
                                                        rand_no=1)

        bucket = create_bucket_with_versioning(rgw_conn, user_info,
                                               bucket_name)

        upload_objects(user_info, bucket, config)

        log.info('sharding configuration will be added now.')

        if config.sharding_type == 'online':

            log.info('sharding type is online')

            # for online,
            # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
            # example: objects = 500 ; max object per shard = 10
            # then no of shards should be at least 50 or more

            time.sleep(15)

            log.info('making changes to ceph.conf')

            ceph_conf.set_to_ceph_conf('global',
                                       ConfigOpts.rgw_max_objs_per_shard,
                                       config.max_objects_per_shard)

            ceph_conf.set_to_ceph_conf('global',
                                       ConfigOpts.rgw_dynamic_resharding, True)

            num_shards_expected = config.objects_count / config.max_objects_per_shard

            log.info('num_shards_expected: %s' % num_shards_expected)

            log.info('trying to restart services ')

            srv_restarted = rgw_service.restart()

            time.sleep(30)

            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')

        if config.sharding_type == 'offline':

            log.info('sharding type is offline')

            # for offline.
            # the number of shards will be the value set in the command.

            time.sleep(15)

            log.info('in offline sharding')

            cmd_exec = utils.exec_shell_cmd(
                'radosgw-admin bucket reshard --bucket=%s --num-shards=%s' %
                (bucket.name, config.no_of_shards))

            if cmd_exec is False:
                raise TestExecError(
                    "offline resharding command execution failed")

        # upload_objects(user_info, bucket, config)

        log.info('s3 objects to create: %s' % config.objects_count)

        for oc in range(config.objects_count):
            s3_object_name = utils.gen_s3_object_name(
                bucket.name, config.objects_count + oc)

            resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                    config, user_info)

        time.sleep(300)

        log.info('verification starts')

        op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" %
                                  bucket.name)
        json_doc = json.loads(op)
        bucket_id = json_doc['data']['bucket']['bucket_id']

        op2 = utils.exec_shell_cmd(
            "radosgw-admin metadata get bucket.instance:%s:%s" %
            (bucket.name, bucket_id))
        json_doc2 = json.loads((op2))
        num_shards_created = json_doc2['data']['bucket_info']['num_shards']

        log.info('no_of_shards_created: %s' % num_shards_created)
        log.info('no_of_shards_expected: %s' % num_shards_expected)

        if config.sharding_type == 'offline':

            if num_shards_expected != num_shards_created:
                raise TestExecError("expected number of shards not created")

            log.info('Expected number of shards created')

        if config.sharding_type == 'online':

            log.info(
                'for online, '
                'number of shards created should be greater than or equal to number of  expected shards'
            )

            if int(num_shards_created) >= int(num_shards_expected):
                log.info('Expected number of shards created')

            else:
                raise TestExecError('Expected number of shards not created')

        read_io = ReadIOInfo()
        read_io.yaml_fname = 'io_info.yaml'
        read_io.verify_io()

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Ejemplo n.º 6
0
def test_exec(config):
    test_info = AddTestInfo('create m buckets with n objects')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        if config.test_ops.get('encryption_algorithm', None) is not None:
            log.info('encryption enabled, making ceph config changes')
            ceph_conf.set_to_ceph_conf('global',
                                       ConfigOpts.rgw_crypt_require_ssl, False)
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info('RGW service restarted')
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            if config.use_aws4 is True:
                rgw_conn = auth.do_auth(**{'signature_version': 's3v4'})
            else:
                rgw_conn = auth.do_auth()
            # enabling sharding
            if config.test_ops['sharding']['enable'] is True:
                log.info('enabling sharding on buckets')
                max_shards = config.test_ops['sharding']['max_shards']
                log.info('making changes to ceph.conf')
                ceph_conf.set_to_ceph_conf(
                    'global', ConfigOpts.rgw_override_bucket_index_max_shards,
                    max_shards)
                log.info('trying to restart services ')
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info('RGW service restarted')
            if config.test_ops['compression']['enable'] is True:
                compression_type = config.test_ops['compression']['type']
                log.info('enabling compression')
                cmd = 'radosgw-admin zone placement modify --rgw-zone=default ' \
                      '--placement-id=default-placement --compression=%s' % compression_type
                out = utils.exec_shell_cmd(cmd)
                try:
                    data = json.loads(out)
                    if data['placement_pools'][0]['val'][
                            'compression'] == compression_type:
                        log.info('Compression enabled successfully')
                    else:
                        raise ValueError('failed to enable compression')
                except ValueError, e:
                    exit(str(e))
                log.info('trying to restart rgw services ')
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info('RGW service restarted')
            # create buckets
            if config.test_ops['create_bucket'] is True:
                log.info('no of buckets to create: %s' % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name_to_create = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=bc)
                    log.info('creating bucket with name: %s' %
                             bucket_name_to_create)
                    bucket = resuables.create_bucket(bucket_name_to_create,
                                                     rgw_conn, each_user)
                    if config.test_ops['create_object'] is True:
                        # uploading data
                        log.info('s3 objects to create: %s' %
                                 config.objects_count)
                        for oc, size in config.mapped_sizes.items():
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket_name_to_create, oc)
                            log.info('s3 object name: %s' % s3_object_name)
                            s3_object_path = os.path.join(
                                TEST_DATA_PATH, s3_object_name)
                            log.info('s3 object path: %s' % s3_object_path)
                            if config.test_ops.get(
                                    'upload_type') == 'multipart':
                                log.info('upload type: multipart')
                                resuables.upload_mutipart_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            else:
                                log.info('upload type: normal')
                                resuables.upload_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                            if config.test_ops['download_object'] is True:
                                log.info('trying to download object: %s' %
                                         s3_object_name)
                                s3_object_download_name = s3_object_name + "." + "download"
                                s3_object_download_path = os.path.join(
                                    TEST_DATA_PATH, s3_object_download_name)
                                log.info('s3_object_download_path: %s' %
                                         s3_object_download_path)
                                log.info('downloading to filename: %s' %
                                         s3_object_download_name)
                                if config.test_ops.get('encryption_algorithm',
                                                       None) is not None:
                                    log.info('encryption download')
                                    log.info(
                                        'encryption algorithm: %s' %
                                        config.test_ops['encryption_algorithm']
                                    )
                                    object_downloaded_status = bucket.download_file(
                                        s3_object_name,
                                        s3_object_download_path,
                                        ExtraArgs={
                                            'SSECustomerKey':
                                            encryption_key,
                                            'SSECustomerAlgorithm':
                                            config.
                                            test_ops['encryption_algorithm']
                                        })
                                else:
                                    object_downloaded_status = s3lib.resource_op(
                                        {
                                            'obj':
                                            bucket,
                                            'resource':
                                            'download_file',
                                            'args': [
                                                s3_object_name,
                                                s3_object_download_path
                                            ],
                                        })
                                if object_downloaded_status is False:
                                    raise TestExecError(
                                        "Resource execution failed: object download failed"
                                    )
                                if object_downloaded_status is None:
                                    log.info('object downloaded')
                                s3_object_downloaded_md5 = utils.get_md5(
                                    s3_object_download_path)
                                s3_object_uploaded_md5 = utils.get_md5(
                                    s3_object_path)
                                log.info('s3_object_downloaded_md5: %s' %
                                         s3_object_downloaded_md5)
                                log.info('s3_object_uploaded_md5: %s' %
                                         s3_object_uploaded_md5)
                                if str(s3_object_uploaded_md5) == str(
                                        s3_object_downloaded_md5):
                                    log.info('md5 match')
                                    utils.exec_shell_cmd(
                                        'rm -rf %s' % s3_object_download_path)
                                else:
                                    raise TestExecError('md5 mismatch')
                            if config.local_file_delete is True:
                                log.info(
                                    'deleting local file created after the upload'
                                )
                                utils.exec_shell_cmd('rm -rf %s' %
                                                     s3_object_path)
                        # verification of shards after upload
                        if config.test_ops['sharding']['enable'] is True:
                            cmd = 'radosgw-admin metadata get bucket:%s | grep bucket_id' % bucket.name
                            out = utils.exec_shell_cmd(cmd)
                            b_id = out.replace(
                                '"', '').strip().split(":")[1].strip().replace(
                                    ',', '')
                            cmd2 = 'rados -p default.rgw.buckets.index ls | grep %s' % b_id
                            out = utils.exec_shell_cmd(cmd2)
                            log.info(
                                'got output from sharing verification.--------'
                            )
                        # print out bucket stats and verify in logs for compressed data by
                        # comparing size_kb_utilized and size_kb_actual
                        if config.test_ops['compression']['enable'] is True:
                            cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                            out = utils.exec_shell_cmd(cmd)
                        # print out bucket stats and verify in logs for compressed data by
                        # comparing size_kb_utilized and size_kb_actual
                        if config.test_ops['compression']['enable'] is True:
                            cmd = 'radosgw-admin bucket stats --bucket=%s' % bucket.name
                            out = utils.exec_shell_cmd(cmd)
                        if config.test_ops['delete_bucket_object'] is True:
                            log.info('listing all objects in bucket: %s' %
                                     bucket.name)
                            objects = s3lib.resource_op({
                                'obj': bucket,
                                'resource': 'objects',
                                'args': None
                            })
                            log.info('objects :%s' % objects)
                            all_objects = s3lib.resource_op({
                                'obj': objects,
                                'resource': 'all',
                                'args': None
                            })
                            log.info('all objects: %s' % all_objects)
                            for obj in all_objects:
                                log.info('object_name: %s' % obj.key)
                            log.info('deleting all objects in bucket')
                            objects_deleted = s3lib.resource_op({
                                'obj': objects,
                                'resource': 'delete',
                                'args': None
                            })
                            log.info('objects_deleted: %s' % objects_deleted)
                            if objects_deleted is False:
                                raise TestExecError(
                                    'Resource execution failed: Object deletion failed'
                                )
                            if objects_deleted is not None:
                                response = HttpResponseParser(
                                    objects_deleted[0])
                                if response.status_code == 200:
                                    log.info('objects deleted ')
                                else:
                                    raise TestExecError(
                                        "objects deletion failed")
                            else:
                                raise TestExecError("objects deletion failed")
                            log.info('deleting bucket: %s' % bucket.name)
                            # bucket_deleted_status = s3_ops.resource_op(bucket, 'delete')
                            bucket_deleted_status = s3lib.resource_op({
                                'obj':
                                bucket,
                                'resource':
                                'delete',
                                'args':
                                None
                            })
                            log.info('bucket_deleted_status: %s' %
                                     bucket_deleted_status)
                            if bucket_deleted_status is not None:
                                response = HttpResponseParser(
                                    bucket_deleted_status)
                                if response.status_code == 204:
                                    log.info('bucket deleted ')
                                else:
                                    raise TestExecError(
                                        "bucket deletion failed")
                            else:
                                raise TestExecError("bucket deletion failed")
            # disable compression after test
            if config.test_ops['compression']['enable'] is True:
                log.info('disable compression')
                cmd = 'radosgw-admin zone placement modify --rgw-zone=default ' \
                      '--placement-id=default-placement --compression='
                out = utils.exec_shell_cmd(cmd)
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info('RGW service restarted')

        test_info.success_status('test passed')

        sys.exit(0)
def upload_objects(user_info, bucket, config):
    log.info('s3 objects to create: %s' % config.objects_count)
    for oc, size in config.mapped_sizes.items():
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)
def test_exec(config):
    test_info = AddTestInfo('test versioning with objects')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()

    try:
        test_info.started_info()
        version_count = 3
        # create user
        s3_user = s3lib.create_users(1)[0]
        # authenticate
        auth = Auth(s3_user)
        rgw_conn = auth.do_auth()
        b1_name = 'bucky.1e'  # bucket 1
        b1_k1_name = b1_name + ".key.1"  # key1
        b1_k2_name = b1_name + ".key.2"  # key2
        b2_name = 'bucky.2e'  # bucket 2
        b2_k1_name = b2_name + ".key.1"  # key1
        b2_k2_name = b2_name + ".key.2"  # key2
        b1 = resuables.create_bucket(b1_name, rgw_conn, s3_user)
        b2 = resuables.create_bucket(b2_name, rgw_conn, s3_user)
        # enable versioning on b1
        resuables.enable_versioning(b1, rgw_conn, s3_user,
                                    write_bucket_io_info)
        # upload object to version enabled bucket b1
        obj_sizes = config.mapped_sizes.values()
        config.obj_size = obj_sizes[0]
        for vc in range(version_count):
            resuables.upload_object(b1_k1_name,
                                    b1,
                                    TEST_DATA_PATH,
                                    config,
                                    s3_user,
                                    append_data=True,
                                    append_msg='hello vc count: %s' % str(vc))
        # upload object to non version bucket b2
        config.obj_size = obj_sizes[1]
        resuables.upload_object(b2_k1_name, b2, TEST_DATA_PATH, config,
                                s3_user)
        # copy b2_k1 to b1 and check if version id is created, expectation: version id should be created
        # copy b1_k1 to b2 and check if version id is created, expectation: version id should not be present
        b1_k2 = s3lib.resource_op({
            'obj': rgw_conn,
            'resource': 'Object',
            'args': [b1.name, b1_k2_name]
        })
        b2_k2 = s3lib.resource_op({
            'obj': rgw_conn,
            'resource': 'Object',
            'args': [b2.name, b2_k2_name]
        })
        log.info(
            'copy from b2_k1 key to b1_k2 key to bucket 1 -> version enabled bucket'
        )
        copy_response = b1_k2.copy_from(CopySource={
            'Bucket': b2.name,
            'Key': b2_k1_name,
        })
        log.info('copy_response: %s' % copy_response)
        if copy_response is None:
            raise TestExecError("copy object failed")
        log.info('checking if copies object has version id created')
        b1_k2_version_id = b1_k2.version_id
        log.info('version id: %s' % b1_k2_version_id)
        if b1_k2_version_id is None:
            raise TestExecError(
                'Version ID not created for the copied object on to the versioned enabled bucket'
            )
        else:
            log.info(
                'Version ID created for the copied object on to the versioned bucket'
            )
        all_objects_in_b1 = b1.objects.all()
        log.info('all objects in bucket 1')
        for obj in all_objects_in_b1:
            log.info('object_name: %s' % obj.key)
            versions = b1.object_versions.filter(Prefix=obj.key)
            log.info('displaying all versions of the object')
            for version in versions:
                log.info('key_name: %s --> version_id: %s' %
                         (version.object_key, version.version_id))
        log.info('-------------------------------------------')
        log.info(
            'copy from b1_k1 key to b2_k2 to bucket 2 -> non version bucket')
        copy_response = b2_k2.copy_from(CopySource={
            'Bucket': b1.name,
            'Key': b1_k1_name,
        })
        log.info('copy_response: %s' % copy_response)
        if copy_response is None:
            raise TestExecError("copy object failed")
        log.info('checking if copies object has version id created')
        b2_k2_version_id = b2_k2.version_id
        log.info('version id: %s' % b2_k2_version_id)
        if b2_k2_version_id is None:
            log.info(
                'Version ID not created for the copied object on to the non versioned bucket'
            )
        else:
            raise TestExecError(
                'Version ID created for the copied object on to the non versioned bucket'
            )
        all_objects_in_b2 = b2.objects.all()
        log.info('all objects in bucket 2')
        for obj in all_objects_in_b2:
            log.info('object_name: %s' % obj.key)
            versions = b2.object_versions.filter(Prefix=obj.key)
            log.info('displaying all versions of the object')
            for version in versions:
                log.info('key_name: %s --> version_id: %s' %
                         (version.object_key, version.version_id))

        test_info.success_status('test passed')
        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):

    test_info = AddTestInfo('storage_policy for %s' % config.rgw_client)
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    rgw_service = RGWService()

    try:

        # create pool

        pool_name = '.rgw.buckets.special'
        pg_num = '8'
        pgp_num = '8'

        pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (
            pool_name, pg_num, pgp_num)

        pool_create_exec = utils.exec_shell_cmd(pool_create)

        if pool_create_exec is False:
            raise TestExecError("Pool creation failed")

        # create realm

        realm_name = 'buz-tickets'

        log.info('creating realm name')

        realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s --default' % realm_name

        realm_create_exec = utils.exec_shell_cmd(realm_create)

        if realm_create_exec is False:
            raise TestExecError("cmd execution failed")

        # sample output of create realm
        """
        {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
        
        """
        log.info('modify zonegroup ')

        modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default' % realm_name

        modify_exec = utils.exec_shell_cmd(modify)

        if modify_exec is False:
            raise TestExecError("cmd execution failed")

        # get the zonegroup

        zonegroup_file = 'zonegroup.json'

        get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file

        get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)

        if get_zonegroup_exec is False:
            raise TestExecError("cmd execution failed")

        add_to_placement_targets = {"name": "special-placement", "tags": []}

        fp = open(zonegroup_file, 'r')
        zonegroup_txt = fp.read()
        fp.close()

        log.info('got zonegroup info: \n%s' % zonegroup_txt)

        zonegroup = json.loads(zonegroup_txt)

        log.info('adding placement targets')

        zonegroup['placement_targets'].append(add_to_placement_targets)

        with open(zonegroup_file, 'w') as fp:
            json.dump(zonegroup, fp)

        zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file

        zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)

        if zonegroup_set_exec is False:
            raise TestExecError("cmd execution failed")

        log.info('zone group update completed')

        log.info('getting zone file')

        # get zone

        log.info('getting zone info')

        zone_file = 'zone.json'
        get_zone = 'sudo radosgw-admin zone --rgw-zone=default  get > zone.json'
        get_zone_exec = utils.exec_shell_cmd(get_zone)

        if get_zone_exec is False:
            raise TestExecError("cmd execution failed")

        fp = open(zone_file, 'r')
        zone_info = fp.read()
        fp.close()

        log.info('zone_info :\n%s' % zone_info)

        zone_info_cleaned = json.loads(zone_info)

        special_placement_info = {
            "key": "special-placement",
            "val": {
                "index_pool": ".rgw.buckets.index",
                "data_pool": ".rgw.buckets.special",
                "data_extra_pool": ".rgw.buckets.extra"
            }
        }

        log.info('adding  special placement info')

        zone_info_cleaned['placement_pools'].append(special_placement_info)

        print zone_info_cleaned

        with open(zone_file, 'w+') as fp:
            json.dump(zone_info_cleaned, fp)

        zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file

        zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)

        if zone_file_set_exec is False:
            raise TestExecError("cmd execution failed")

        log.info('zone info updated ')

        restarted = rgw_service.restart()

        if restarted is False:
            raise TestExecError("service restart failed")

        if config.rgw_client == 'rgw':

            log.info('client type is rgw')

            rgw_user_info = s3_swift_lib.create_users(1)

            auth = Auth(rgw_user_info)
            rgw_conn = auth.do_auth()

            # create bucket
            bucket_name = utils.gen_bucket_name_from_userid(
                rgw_user_info['user_id'], 0)
            bucket = resuables.create_bucket(bucket_name, rgw_conn,
                                             rgw_user_info)

            # create object
            s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
            resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                    config, rgw_user_info)

        if config.rgw_client == 'swift':

            log.info('client type is swift')

            user_names = ['tuffy', 'scooby', 'max']
            tenant = 'tenant'

            umgmt = UserMgmt()

            umgmt.create_tenant_user(tenant_name=tenant,
                                     user_id=user_names[0],
                                     displayname=user_names[0])

            user_info = umgmt.create_subuser(tenant_name=tenant,
                                             user_id=user_names[0])

            auth = Auth(user_info)

            rgw = auth.do_auth()

            container_name = utils.gen_bucket_name_from_userid(
                user_info['user_id'], rand_no=0)

            container = s3_swift_lib.resource_op({
                'obj': rgw,
                'resource': 'put_container',
                'args': [container_name]
            })

            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")

            swift_object_name = utils.gen_s3_object_name(
                '%s.container.%s' % (user_names[0], 0), 0)

            log.info('object name: %s' % swift_object_name)

            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)

            log.info('object path: %s' % object_path)

            object_size = utils.get_file_size(config.objects_size_range['min'],
                                              config.objects_size_range['max'])

            data_info = manage_data.io_generator(object_path, object_size)

            # upload object

            if data_info is False:
                TestExecError("data creation failed")

            log.info('uploading object: %s' % object_path)

            with open(object_path, 'r') as fp:
                rgw.put_object(container_name,
                               swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Ejemplo n.º 10
0
def test_exec(config):
    test_info = AddTestInfo(
        'create m buckets with n objects with bucket life cycle')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            if config.test_ops['create_bucket'] is True:
                log.info('no of buckets to create: %s' % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=1)
                    bucket = resuables.create_bucket(bucket_name, rgw_conn,
                                                     each_user)
                    if config.test_ops['enable_versioning'] is True:
                        log.info('bucket versionig test on bucket: %s' %
                                 bucket.name)
                        # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                        bucket_versioning = s3lib.resource_op({
                            'obj':
                            rgw_conn,
                            'resource':
                            'BucketVersioning',
                            'args': [bucket.name]
                        })
                        version_status = s3lib.resource_op({
                            'obj': bucket_versioning,
                            'resource': 'status',
                            'args': None
                        })
                        if version_status is None:
                            log.info('bucket versioning still not enabled')
                        # enabling bucket versioning
                        version_enable_status = s3lib.resource_op({
                            'obj': bucket_versioning,
                            'resource': 'enable',
                            'args': None
                        })
                        response = HttpResponseParser(version_enable_status)
                        if response.status_code == 200:
                            log.info('version enabled')
                        else:
                            raise TestExecError("version enable failed")
                    if config.test_ops['create_object'] is True:
                        # upload data
                        for oc, size in config.mapped_sizes.items():
                            config.obj_size = size
                            s3_object_name = utils.gen_s3_object_name(
                                bucket.name, oc)
                            if config.test_ops['version_count'] > 0:
                                for vc in range(
                                        config.test_ops['version_count']):
                                    log.info('version count for %s is %s' %
                                             (s3_object_name, str(vc)))
                                    log.info('modifying data: %s' %
                                             s3_object_name)
                                    resuables.upload_object(
                                        s3_object_name,
                                        bucket,
                                        TEST_DATA_PATH,
                                        config,
                                        each_user,
                                        append_data=True,
                                        append_msg=
                                        'hello object for version: %s\n' %
                                        str(vc))
                            else:
                                log.info('s3 objects to create: %s' %
                                         config.objects_count)
                                resuables.upload_object(
                                    s3_object_name, bucket, TEST_DATA_PATH,
                                    config, each_user)
                    bucket_life_cycle = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'BucketLifecycleConfiguration',
                        'args': [bucket.name]
                    })
                    life_cycle = basic_lifecycle_config(prefix="key",
                                                        days=20,
                                                        id="rul1")
                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle)
                    })
                    log.info('put bucket life cycle:\n%s' %
                             put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info('bucket life cycle added')
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info('trying to retrieve bucket lifecycle config')
                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        'get_bucket_lifecycle_configuration',
                        "kwargs":
                        dict(Bucket=bucket.name)
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)
                        if response.status_code == 200:
                            log.info('bucket life cycle retrieved')
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")
                    else:
                        raise TestExecError("bucket life cycle retrieved")
                    if config.test_ops['create_object'] is True:
                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(
                                bucket.name, oc)
                            if config.test_ops['version_count'] > 0:
                                if config.test_ops.get(
                                        'delete_versioned_object',
                                        None) is True:
                                    log.info(
                                        'list all the versions of the object and delete the '
                                        'current version of the object')
                                    log.info(
                                        'all versions for the object: %s\n' %
                                        s3_object_name)
                                    versions = bucket.object_versions.filter(
                                        Prefix=s3_object_name)
                                    t1 = []
                                    for version in versions:
                                        log.info(
                                            'key_name: %s --> version_id: %s' %
                                            (version.object_key,
                                             version.version_id))
                                        t1.append(version.version_id)
                                    s3_object = s3lib.resource_op({
                                        'obj':
                                        rgw_conn,
                                        'resource':
                                        'Object',
                                        'args': [bucket.name, s3_object_name]
                                    })
                                    # log.info('object version to delete: %s -> %s' % (versions[0].object_key,
                                    #                                                 versions[0].version_id))
                                    delete_response = s3_object.delete()
                                    log.info('delete response: %s' %
                                             delete_response)
                                    if delete_response['DeleteMarker'] is True:
                                        log.info(
                                            'object delete marker is set to true'
                                        )
                                    else:
                                        raise TestExecError(
                                            "'object delete marker is set to false"
                                        )
                                    log.info(
                                        'available versions for the object after delete marker is set'
                                    )
                                    t2 = []
                                    versions_after_delete_marker_is_set = bucket.object_versions.filter(
                                        Prefix=s3_object_name)
                                    for version in versions_after_delete_marker_is_set:
                                        log.info(
                                            'key_name: %s --> version_id: %s' %
                                            (version.object_key,
                                             version.version_id))
                                        t2.append(version.version_id)
                                    t2.pop()
                                    if t1 == t2:
                                        log.info('versions remained intact')
                                    else:
                                        raise TestExecError(
                                            'versions are not intact after delete marker is set'
                                        )
                    # modify bucket lifecycle configuration, modify expiration days here for the test case.
                    if config.test_ops.get('modify_lifecycle', False) is True:
                        log.info('modifying lifecycle configuration')
                        life_cycle_modifed = basic_lifecycle_config(
                            prefix="key",
                            days=15,
                            id="rul1",
                            status="Disabled")
                        put_bucket_life_cycle = s3lib.resource_op({
                            "obj":
                            bucket_life_cycle,
                            "resource":
                            "put",
                            "kwargs":
                            dict(LifecycleConfiguration=life_cycle_modifed)
                        })
                        log.info('put bucket life cycle:\n%s' %
                                 put_bucket_life_cycle)
                        if put_bucket_life_cycle is False:
                            raise TestExecError(
                                "Resource execution failed: bucket creation faield"
                            )
                        if put_bucket_life_cycle is not None:
                            response = HttpResponseParser(
                                put_bucket_life_cycle)

                            if response.status_code == 200:
                                log.info('bucket life cycle added')

                            else:
                                raise TestExecError(
                                    "bucket lifecycle addition failed")
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                        log.info('trying to retrieve bucket lifecycle config')
                        get_bucket_life_cycle_config = s3lib.resource_op({
                            "obj":
                            rgw_conn2,
                            "resource":
                            'get_bucket_lifecycle_configuration',
                            "kwargs":
                            dict(Bucket=bucket.name)
                        })
                        if get_bucket_life_cycle_config is False:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")
                        if get_bucket_life_cycle_config is not None:
                            response = HttpResponseParser(
                                get_bucket_life_cycle_config)
                            modified_expiration_days = get_bucket_life_cycle_config[
                                'Rules'][0]['Expiration']['Days']
                            log.info('modified expiration days: %s' %
                                     modified_expiration_days)
                            if response.status_code == 200 and modified_expiration_days == 15:
                                log.info(
                                    'bucket life cycle retrieved after modifying'
                                )
                            else:
                                raise TestExecError(
                                    "bucket lifecycle config retrieval failed after modifying"
                                )
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after modifying"
                            )
                    # disable bucket lifecycle configuration
                    if config.test_ops.get('disable_lifecycle', False) is True:
                        log.info('disabling lifecycle configuration')
                        life_cycle_disabled_config = basic_lifecycle_config(
                            prefix="key",
                            days=20,
                            id="rul1",
                            status="Disabled")
                        put_bucket_life_cycle = s3lib.resource_op({
                            "obj":
                            bucket_life_cycle,
                            "resource":
                            "put",
                            "kwargs":
                            dict(LifecycleConfiguration=
                                 life_cycle_disabled_config)
                        })
                        log.info('put bucket life cycle:\n%s' %
                                 put_bucket_life_cycle)
                        if put_bucket_life_cycle is False:
                            raise TestExecError(
                                "Resource execution failed: bucket creation faield"
                            )
                        if put_bucket_life_cycle is not None:
                            response = HttpResponseParser(
                                put_bucket_life_cycle)
                            if response.status_code == 200:
                                log.info('bucket life cycle added')
                            else:
                                raise TestExecError(
                                    "bucket lifecycle addition failed")
                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")
                        log.info('trying to retrieve bucket lifecycle config')
                        get_bucket_life_cycle_config = s3lib.resource_op({
                            "obj":
                            rgw_conn2,
                            "resource":
                            'get_bucket_lifecycle_configuration',
                            "kwargs":
                            dict(Bucket=bucket.name)
                        })
                        if get_bucket_life_cycle_config is False:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")
                        if get_bucket_life_cycle_config is not None:
                            response = HttpResponseParser(
                                get_bucket_life_cycle_config)
                            if response.status_code == 200 and get_bucket_life_cycle_config[
                                    'Rules'][0]['Status'] == 'Disabled':
                                log.info('disabled_status: %s' %
                                         get_bucket_life_cycle_config['Rules']
                                         [0]['Status'])
                                log.info(
                                    'bucket life cycle retrieved after disabled'
                                )
                            else:
                                raise TestExecError(
                                    "bucket lifecycle config retrieval failed after disabled"
                                )
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed after disabled"
                            )
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo(
        'create m buckets with n objects with bucket life cycle')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:

        test_info.started_info()

        # create user

        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)

        for each_user in all_users_info:

            # authenticate

            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()

            # create buckets

            if config.test_ops['create_bucket'] is True:

                log.info('no of buckets to create: %s' % config.bucket_count)

                for bc in range(config.bucket_count):

                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=1)
                    bucket = resuables.create_bucket(bucket_name, rgw_conn,
                                                     each_user)

                    if config.test_ops['create_object'] is True:

                        # uploading data

                        log.info('s3 objects to create: %s' %
                                 config.objects_count)

                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(
                                bucket.name, oc)

                            resuables.upload_object(s3_object_name, bucket,
                                                    TEST_DATA_PATH, config,
                                                    each_user)

                    bucket_life_cycle = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'BucketLifecycleConfiguration',
                        'args': [bucket.name]
                    })

                    life_cycle = basic_lifecycle_config(prefix="key",
                                                        days=20,
                                                        id="rul1")

                    put_bucket_life_cycle = s3lib.resource_op({
                        "obj":
                        bucket_life_cycle,
                        "resource":
                        "put",
                        "kwargs":
                        dict(LifecycleConfiguration=life_cycle)
                    })

                    log.info('put bucket life cycle:\n%s' %
                             put_bucket_life_cycle)

                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )

                    if put_bucket_life_cycle is not None:

                        response = HttpResponseParser(put_bucket_life_cycle)

                        if response.status_code == 200:
                            log.info('bucket life cycle added')

                        else:
                            raise TestExecError(
                                "bucket lifecycle addition failed")

                    else:
                        raise TestExecError("bucket lifecycle addition failed")

                    log.info('trying to retrieve bucket lifecycle config')

                    get_bucket_life_cycle_config = s3lib.resource_op({
                        "obj":
                        rgw_conn2,
                        "resource":
                        'get_bucket_lifecycle_configuration',
                        "kwargs":
                        dict(Bucket=bucket.name)
                    })
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError(
                            "bucket lifecycle config retrieval failed")

                    if get_bucket_life_cycle_config is not None:

                        response = HttpResponseParser(
                            get_bucket_life_cycle_config)

                        if response.status_code == 200:
                            log.info('bucket life cycle retrieved')

                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed")

                    else:
                        raise TestExecError("bucket life cycle retrieved")

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)