def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # preparing data
        user_names = ['user1', 'user2', 'user3']
        Bucket_names = ['bucket1', 'bucket2', 'bucket3']
        object_names = ['o1', 'o2']
        tenant1 = 'tenant1'
        tenant2 = 'tenant2'
        t1_u1_info = create_tenant_user(tenant_name=tenant1,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t1_u1_auth = Auth(t1_u1_info)
        t1_u1 = t1_u1_auth.do_auth()
        t2_u1_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t2_u1_auth = Auth(t2_u1_info)
        t2_u1 = t2_u1_auth.do_auth()
        t1_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t1_u1,
                                           user_info=t1_u1_info)
        t2_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t2_u1,
                                           user_info=t2_u1_info)
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t1_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t2_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        t2_u2_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[1],
                                        cluster_name=config.cluster_name)
        t2_u2_auth = Auth(t2_u2_info)
        t2_u2 = t2_u2_auth.do_auth()
        # will try to access the bucket and objects in both tenants
        # access t1_u1_b1
        log.info('trying to access tenant1->user1->bucket1')
        t1_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant1->user1->bucket1->object1 from tenant2->user2'
        )
        download_path1 = TEST_DATA_PATH + "/t1_u1_b1_%s.download" % object_names[
            0]
        t1_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t1_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path1]
        })
        if t1_u1_b1_o1_download is False:
            log.info('object not downloaded\n')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                "object downloaded for tenant1->user1->bucket1->object1, this should not happen"
            )
        log.info(
            'trying to access tenant2->user1->bucket1 from user2 in tenant 2')
        t2_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant2->user1->bucket1->object1 from tenant2->user2'
        )
        download_path2 = TEST_DATA_PATH + "/t2_u1_b1_%s.download" % object_names[
            0]
        t2_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t2_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path2]
        })
        if t2_u1_b1_o1_download is False:
            log.info('object did not download, worked as expected')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                'object downloaded\n'
                'downloaded tenant2->user1->bucket1->object1, this should not happen'
            )
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("create m buckets with n objects with bucket life cycle")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count, config.cluster_name)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            if config.test_ops["create_bucket"] is True:
                log.info("no of buckets to create: %s" % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name = utils.gen_bucket_name_from_userid(
                        each_user["user_id"], rand_no=1
                    )
                    bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user)
                    if config.test_ops["create_object"] is True:
                        # uploading data
                        log.info("s3 objects to create: %s" % config.objects_count)
                        for oc in range(config.objects_count):
                            s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                            resuables.upload_object(
                                s3_object_name,
                                bucket,
                                TEST_DATA_PATH,
                                config,
                                each_user,
                            )
                    bucket_life_cycle = s3lib.resource_op(
                        {
                            "obj": rgw_conn,
                            "resource": "BucketLifecycleConfiguration",
                            "args": [bucket.name],
                        }
                    )
                    life_cycle = basic_lifecycle_config(
                        prefix="key", days=20, id="rul1"
                    )
                    put_bucket_life_cycle = s3lib.resource_op(
                        {
                            "obj": bucket_life_cycle,
                            "resource": "put",
                            "kwargs": dict(LifecycleConfiguration=life_cycle),
                        }
                    )
                    log.info("put bucket life cycle:\n%s" % put_bucket_life_cycle)
                    if put_bucket_life_cycle is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation faield"
                        )
                    if put_bucket_life_cycle is not None:
                        response = HttpResponseParser(put_bucket_life_cycle)
                        if response.status_code == 200:
                            log.info("bucket life cycle added")
                        else:
                            raise TestExecError("bucket lifecycle addition failed")
                    else:
                        raise TestExecError("bucket lifecycle addition failed")
                    log.info("trying to retrieve bucket lifecycle config")
                    get_bucket_life_cycle_config = s3lib.resource_op(
                        {
                            "obj": rgw_conn2,
                            "resource": "get_bucket_lifecycle_configuration",
                            "kwargs": dict(Bucket=bucket.name),
                        }
                    )
                    if get_bucket_life_cycle_config is False:
                        raise TestExecError("bucket lifecycle config retrieval failed")
                    if get_bucket_life_cycle_config is not None:
                        response = HttpResponseParser(get_bucket_life_cycle_config)
                        if response.status_code == 200:
                            log.info("bucket life cycle retrieved")
                        else:
                            raise TestExecError(
                                "bucket lifecycle config retrieval failed"
                            )
                    else:
                        raise TestExecError("bucket life cycle retrieved")
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
Пример #3
0
def test_exec(config):
    test_info = AddTestInfo('storage_policy for %s' % config.rgw_client)
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    rgw_service = RGWService()

    try:
        # create pool
        pool_name = '.rgw.buckets.special'
        pg_num = '8'
        pgp_num = '8'
        pool_create = 'sudo ceph osd pool create "%s" %s %s replicated' % (pool_name, pg_num, pgp_num)
        pool_create_exec = utils.exec_shell_cmd(pool_create)
        if pool_create_exec is False:
            raise TestExecError("Pool creation failed")
        # create realm
        realm_name = 'buz-tickets'
        log.info('creating realm name')
        realm_create = 'sudo radosgw-admin realm create --rgw-realm=%s --default' % realm_name
        realm_create_exec = utils.exec_shell_cmd(realm_create)
        if realm_create_exec is False:
            raise TestExecError("cmd execution failed")
        # sample output of create realm
        """
        {
        "id": "0956b174-fe14-4f97-8b50-bb7ec5e1cf62",
        "name": "buz-tickets",
        "current_period": "1950b710-3e63-4c41-a19e-46a715000980",
        "epoch": 1
    }
        
        """
        log.info('modify zonegroup ')
        modify = 'sudo radosgw-admin zonegroup modify --rgw-zonegroup=default --rgw-realm=%s --master --default' % realm_name
        modify_exec = utils.exec_shell_cmd(modify)
        if modify_exec is False:
            raise TestExecError("cmd execution failed")
        # get the zonegroup
        zonegroup_file = 'zonegroup.json'
        get_zonegroup = 'sudo radosgw-admin zonegroup --rgw-zonegroup=default get > %s' % zonegroup_file
        get_zonegroup_exec = utils.exec_shell_cmd(get_zonegroup)
        if get_zonegroup_exec is False:
            raise TestExecError("cmd execution failed")
        add_to_placement_targets = {
            "name": "special-placement",
            "tags": []
        }
        fp = open(zonegroup_file, 'r')
        zonegroup_txt = fp.read()
        fp.close()
        log.info('got zonegroup info: \n%s' % zonegroup_txt)
        zonegroup = json.loads(zonegroup_txt)
        log.info('adding placement targets')
        zonegroup['placement_targets'].append(add_to_placement_targets)
        with open(zonegroup_file, 'w') as fp:
            json.dump(zonegroup, fp)
        zonegroup_set = 'sudo radosgw-admin zonegroup set < %s' % zonegroup_file
        zonegroup_set_exec = utils.exec_shell_cmd(zonegroup_set)
        if zonegroup_set_exec is False:
            raise TestExecError("cmd execution failed")
        log.info('zone group update completed')
        log.info('getting zone file')
        # get zone
        log.info('getting zone info')
        zone_file = 'zone.json'
        get_zone = 'sudo radosgw-admin zone --rgw-zone=default  get > zone.json'
        get_zone_exec = utils.exec_shell_cmd(get_zone)
        if get_zone_exec is False:
            raise TestExecError("cmd execution failed")
        fp = open(zone_file, 'r')
        zone_info = fp.read()
        fp.close()
        log.info('zone_info :\n%s' % zone_info)
        zone_info_cleaned = json.loads(zone_info)
        special_placement_info = {
            "key": "special-placement",
            "val": {
                "index_pool": ".rgw.buckets.index",
                "data_pool": ".rgw.buckets.special",
                "data_extra_pool": ".rgw.buckets.extra"
            }
        }
        log.info('adding  special placement info')
        zone_info_cleaned['placement_pools'].append(special_placement_info)
        print(zone_info_cleaned)
        with open(zone_file, 'w+') as fp:
            json.dump(zone_info_cleaned, fp)
        zone_file_set = 'sudo radosgw-admin zone set < %s' % zone_file
        zone_file_set_exec = utils.exec_shell_cmd(zone_file_set)
        if zone_file_set_exec is False:
            raise TestExecError("cmd execution failed")

        log.info('zone info updated ')
        restarted = rgw_service.restart()
        if restarted is False:
            raise TestExecError("service restart failed")
        if config.rgw_client == 'rgw':
            log.info('client type is rgw')
            rgw_user_info = s3_swift_lib.create_users(1)
            auth = Auth(rgw_user_info)
            rgw_conn = auth.do_auth()
            # create bucket
            bucket_name = utils.gen_bucket_name_from_userid(rgw_user_info['user_id'], 0)
            bucket = resuables.create_bucket(bucket_name, rgw_conn, rgw_user_info)
            # create object
            s3_object_name = utils.gen_s3_object_name(bucket_name, 0)
            resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, rgw_user_info)

        if config.rgw_client == 'swift':
            log.info('client type is swift')

            user_names = ['tuffy', 'scooby', 'max']
            tenant = 'tenant'

            umgmt = UserMgmt()
            umgmt.create_tenant_user(tenant_name=tenant, user_id=user_names[0],
                                     displayname=user_names[0])

            user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])

            auth = Auth(user_info)
            rgw = auth.do_auth()
            container_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=0)
            container = s3_swift_lib.resource_op({'obj': rgw,
                                                  'resource': 'put_container',
                                                  'args': [container_name]})
            if container is False:
                raise TestExecError("Resource execution failed: container creation faield")
            swift_object_name = utils.gen_s3_object_name('%s.container.%s' % (user_names[0], 0), 0)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            object_size = utils.get_file_size(config.objects_size_range['min'],
                                              config.objects_size_range['max'])
            data_info = manage_data.io_generator(object_path, object_size)
            # upload object
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name, swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo('Bucket Request Payer')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            s3_object_names = []
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' % bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = resuables.create_bucket(bucket_name=bucket_name_to_create, rgw=rgw_conn, user_info=each_user)
                bucket_request_payer = s3lib.resource_op({'obj': rgw_conn,
                                                          'resource': 'BucketRequestPayment',
                                                          'args': [bucket.name]
                                                          })
                # change the bucket request payer to 'requester'
                payer = {'Payer': 'Requester'}
                response = s3lib.resource_op({'obj': bucket_request_payer,
                                              'resource': 'put',
                                              'kwargs': dict(RequestPaymentConfiguration=payer)})
                log.info(response)
                if response is not None:
                    response = HttpResponseParser(response)
                    if response.status_code == 200:
                        log.info('bucket created')
                    else:
                        raise TestExecError("bucket request payer modification failed")
                else:
                    raise TestExecError("bucket request payer modification failed")
                payer = bucket_request_payer.payer
                log.info('bucket request payer: %s' % payer)
                if payer != 'Requester':
                    TestExecError('Request payer is not set or changed properly ')
                log.info('s3 objects to create: %s' % config.objects_count)
                if config.objects_count is not None:
                    log.info('objects size range:\n%s' % config.objects_size_range)
                    for oc in range(config.objects_count):
                        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("RGW Dynamic Resharding test")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    try:
        test_info.started_info()
        log.info("starting IO")
        config.max_objects_per_shard = 10
        config.no_of_shards = 10
        config.user_count = 1
        user_info = s3lib.create_users(config.user_count)
        user_info = user_info[0]
        auth = Auth(user_info)
        rgw_conn = auth.do_auth()
        config.bucket_count = 1
        log.info("no of buckets to create: %s" % config.bucket_count)
        bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"],
                                                        rand_no=1)
        bucket = create_bucket_with_versioning(rgw_conn, user_info,
                                               bucket_name)
        upload_objects(user_info, bucket, config)
        log.info("sharding configuration will be added now.")
        if config.sharding_type == "online":
            log.info("sharding type is online")
            # for online,
            # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
            # example: objects = 500 ; max object per shard = 10
            # then no of shards should be at least 50 or more
            time.sleep(15)
            log.info("making changes to ceph.conf")
            ceph_conf.set_to_ceph_conf(
                "global",
                ConfigOpts.rgw_max_objs_per_shard,
                config.max_objects_per_shard,
            )
            ceph_conf.set_to_ceph_conf("global",
                                       ConfigOpts.rgw_dynamic_resharding, True)
            num_shards_expected = config.objects_count / config.max_objects_per_shard
            log.info("num_shards_expected: %s" % num_shards_expected)
            log.info("trying to restart services ")
            srv_restarted = rgw_service.restart()
            time.sleep(30)
            if srv_restarted is False:
                raise TestExecError("RGW service restart failed")
            else:
                log.info("RGW service restarted")
        if config.sharding_type == "offline":
            log.info("sharding type is offline")
            # for offline.
            # the number of shards will be the value set in the command.
            time.sleep(15)
            log.info("in offline sharding")
            cmd_exec = utils.exec_shell_cmd(
                "radosgw-admin bucket reshard --bucket=%s --num-shards=%s" %
                (bucket.name, config.no_of_shards))
            if cmd_exec is False:
                raise TestExecError(
                    "offline resharding command execution failed")
        # upload_objects(user_info, bucket, config)
        log.info("s3 objects to create: %s" % config.objects_count)
        for oc in range(config.objects_count):
            s3_object_name = utils.gen_s3_object_name(
                bucket.name, config.objects_count + oc)
            resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                    config, user_info)
        time.sleep(300)
        log.info("verification starts")
        op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" %
                                  bucket.name)
        json_doc = json.loads(op)
        bucket_id = json_doc["data"]["bucket"]["bucket_id"]
        op2 = utils.exec_shell_cmd(
            "radosgw-admin metadata get bucket.instance:%s:%s" %
            (bucket.name, bucket_id))
        json_doc2 = json.loads((op2))
        num_shards_created = json_doc2["data"]["bucket_info"]["num_shards"]
        log.info("no_of_shards_created: %s" % num_shards_created)
        log.info("no_of_shards_expected: %s" % num_shards_expected)
        if config.sharding_type == "offline":
            if num_shards_expected != num_shards_created:
                raise TestExecError("expected number of shards not created")
            log.info("Expected number of shards created")
        if config.sharding_type == "online":
            log.info(
                "for online, "
                "number of shards created should be greater than or equal to number of  expected shards"
            )
            if int(num_shards_created) >= int(num_shards_expected):
                log.info("Expected number of shards created")
            else:
                raise TestExecError("Expected number of shards not created")
        read_io = ReadIOInfo()
        read_io.yaml_fname = "io_info.yaml"
        read_io.verify_io()
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def upload_objects(user_info, bucket, config):
    log.info("s3 objects to create: %s" % config.objects_count)
    for oc in range(config.objects_count):
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                user_info)