Example #1
0
    def create_admin_user(self, user_id, displayname, cluster_name='ceph'):
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info('cluster name: %s' % cluster_name)
            cmd = 'radosgw-admin user create --uid=%s --display-name=%s --cluster %s' % (
                user_id, displayname, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['user_id']
            user_details['display_name'] = v_as_json['display_name']
            user_details['access_key'] = v_as_json['keys'][0]['access_key']
            user_details['secret_key'] = v_as_json['keys'][0]['secret_key']
            user_info = basic_io_structure.user(**{'user_id': user_details['user_id'],
                                                   'access_key': user_details['access_key'],
                                                   'secret_key': user_details['secret_key']})
            write_user_info.add_user_info(user_info)
            log.info('access_key: %s' % user_details['access_key'])
            log.info('secret_key: %s' % user_details['secret_key'])
            log.info('user_id: %s' % user_details['user_id'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
Example #2
0
def test_exec(config):
    test_info = AddTestInfo('test frontends configuration')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            auth = Auth(each_user, ssl=config.ssl)
            rgw_conn = auth.do_auth()
            bucket_name_to_create2 = utils.gen_bucket_name_from_userid(
                each_user['user_id'])
            log.info('creating bucket with name: %s' % bucket_name_to_create2)
            bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn,
                                             each_user)
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #3
0
    def create_subuser(self, tenant_name, user_id, cluster_name="ceph"):
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            keys = utils.gen_access_key_secret_key(user_id)
            cmd = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s' \
                  % (tenant_name, user_id, user_id, tenant_name, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['subusers'][0]['id']
            user_details['key'] = v_as_json['swift_keys'][0]['secret_key']
            user_details['tenant'], _ = user_details['user_id'].split('$')
            user_info = basic_io_structure.user(**{'user_id': user_details['user_id'],
                                                   'secret_key': user_details['key'],
                                                   'access_key': ' '})
            write_user_info.add_user_info(dict(user_info, **tenant_info.tenant(user_details['tenant'])))
            log.info('secret_key: %s' % user_details['key'])
            log.info('user_id: %s' % user_details['user_id'])
            log.info('tenant: %s' % user_details['tenant'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            return False
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # preparing data
    user_names = ['tuffy', 'scooby', 'max']
    tenant1 = 'tenant'
    cmd = 'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s' % (
    user_names[0], user_names[0], tenant1)
    out = utils.exec_shell_cmd(cmd)
    if out is False:
        raise TestExecError("RGW User creation error")
    log.info('output :%s' % out)
    v1_as_json = json.loads(out)
    log.info('creted user_id: %s' % v1_as_json['user_id'])
    cmd2 = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full' % (
    tenant1, user_names[0], user_names[0], tenant1)
    out2 = utils.exec_shell_cmd(cmd2)
    if out2 is False:
        raise TestExecError("sub-user creation error")
    v2_as_json = json.loads(out2)
    log.info('created subuser: %s' % v2_as_json['subusers'][0]['id'])
    cmd3 = 'radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret' % (
    user_names[0], user_names[0], tenant1, tenant1)
    out3 = utils.exec_shell_cmd(cmd3)
    if out3 is False:
        raise TestExecError("secret_key gen error")
    v3_as_json = json.loads(out3)
    log.info('created subuser: %s\nsecret_key generated: %s' % (
    v3_as_json['swift_keys'][0]['user'], v3_as_json['swift_keys'][0]['secret_key']))
    user_info = {'user_id': v3_as_json['swift_keys'][0]['user'],
                 'key': v3_as_json['swift_keys'][0]['secret_key']}
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=cc)
        container = swiftlib.resource_op({'obj': rgw,
                                          'resource': 'put_container',
                                          'args': [container_name]})
        if container is False:
            raise TestExecError("Resource execution failed: container creation faield")
        for oc,size in list(config.mapped_sizes.items()):
            swift_object_name = utils.gen_s3_object_name('%s.container.%s' % (user_names[0], cc), oc)
            log.info('object name: %s' % swift_object_name)
            object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
            log.info('object path: %s' % object_path)
            data_info = manage_data.io_generator(object_path, size)
            if data_info is False:
                TestExecError("data creation failed")
            log.info('uploading object: %s' % object_path)
            with open(object_path, 'r') as fp:
                rgw.put_object(container_name, swift_object_name,
                               contents=fp.read(),
                               content_type='text/plain')
    # check for any crashes during the execution
    crash_info=reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
Example #5
0
def test_exec(config):
    test_info = AddTestInfo('create users')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    user_detail_file = os.path.join(lib_dir, 'user_details.json')
    try:
        test_info.started_info()
        # create a non-tenanted user
        if config.user_type == 'non-tenanted':
            all_users_info = s3lib.create_users(config.user_count)
            with open(user_detail_file, 'w') as fout:
                json.dump(all_users_info, fout)
            test_info.success_status('non-tenanted users creation completed')
        else:
            log.info('create tenanted users')
            for i in range(config.user_count):
                tenant_name = 'tenant' + str(i)
                all_users_info = s3lib.create_tenant_users(
                    config.user_count, tenant_name)
                with open(user_detail_file, 'w') as fout:
                    json.dump(all_users_info, fout)
                test_info.success_status('tenanted users creation completed')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('user creation failed')
        sys.exit(1)
    except (RGWBaseException, Exception) as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('user creation failed')
        sys.exit(1)
def test_exec(config, requester):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    log.info('requester type: %s' % requester)

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        # create buckets
        log.info('no of buckets to create: %s' % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(
                each_user['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = resuables.create_bucket(bucket_name=bucket_name_to_create,
                                             rgw=rgw_conn,
                                             user_info=each_user)
            bucket_request_payer = s3lib.resource_op({
                'obj': rgw_conn,
                'resource': 'BucketRequestPayment',
                'args': [bucket.name]
            })
            # change the bucket request payer to 'requester'
            payer = {'Payer': requester}
            response = s3lib.resource_op({
                'obj':
                bucket_request_payer,
                'resource':
                'put',
                'kwargs':
                dict(RequestPaymentConfiguration=payer)
            })
            log.info(response)
            if response is not None:
                response = HttpResponseParser(response)
                if response.status_code == 200:
                    log.info('bucket created')
                else:
                    raise TestExecError(
                        "bucket request payer modification failed")
            else:
                raise TestExecError("bucket request payer modification failed")
            payer = bucket_request_payer.payer
            log.info('bucket request payer: %s' % payer)
            if payer != 'Requester':
                TestExecError('Request payer is not set or changed properly ')
            log.info('s3 objects to create: %s' % config.objects_count)
            if config.objects_count is not None:
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    resuables.upload_object(s3_object_name, bucket,
                                            TEST_DATA_PATH, config, each_user)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()

    # preparing data
    user_names = ["tuffy", "scooby", "max"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_names[0],
                                                displayname=user_names[0])
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_names[0])
    cmd = "radosgw-admin quota enable --quota-scope=user --uid={uid} --tenant={tenant}".format(
        uid=user_names[0], tenant=tenant)
    enable_user_quota = utils.exec_shell_cmd(cmd)
    cmd = "radosgw-admin quota set --quota-scope=user --uid={uid} --tenant={tenant} --max_buckets=2000".format(
        uid=user_names[0], tenant=tenant)
    max_bucket = utils.exec_shell_cmd(cmd)
    auth = Auth(user_info)
    rgw = auth.do_auth()
    for cc in range(config.container_count):
        container_name = utils.gen_bucket_name_from_userid(
            user_info["user_id"], rand_no=cc)
        container = swiftlib.resource_op({
            "obj": rgw,
            "resource": "put_container",
            "args": [container_name]
        })
        if container is False:
            raise TestExecError(
                "Resource execution failed: container creation faield")

    host, ip = utils.get_hostname_ip()
    port = utils.get_radosgw_port_no()
    hostname = str(ip) + ":" + str(port)
    cmd = "swift -A http://{hostname}/auth/1.0 -U '{uid}' -K '{key}' stat".format(
        hostname=hostname, uid=user_info["user_id"], key=user_info["key"])
    swift_cmd = utils.exec_shell_cmd(cmd)
    swift_cmd = swift_cmd.replace(" ", "")
    swift_cmd = swift_cmd.replace("\n", ":")
    li = list(swift_cmd.split(":"))
    res_dct = {li[i]: li[i + 1] for i in range(0, len(li) - 1, 2)}

    if int(res_dct["Containers"]) == config.container_count:
        cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
            uid=user_names[0], tenant=tenant)
        delete_user_bucket = utils.exec_shell_cmd(cmd)
        test_info.success_status("test passed")
        sys.exit(0)
    else:
        cmd = "radosgw-admin user rm --uid={uid} --tenant={tenant} --purge-data".format(
            uid=user_names[0], tenant=tenant)
        delete_user_bucket = utils.exec_shell_cmd(cmd)
        test_info.failed_status("test failed")
        sys.exit(1)
    def create_admin_user(self, user_id, displayname, cluster_name="ceph"):
        """
        Function to create a S3-interface/admin user

        The S3-interface/admin user is created with the user_id, displayname, cluster_name.

        Parameters:
            user_id (char): id of the user
            displayname (char): Display Name of the user
            cluster_name (char): Name of the ceph cluster. defaults to 'ceph'

        Returns:
            user details, which contain the following
                - user_id
                - display_name
                - access_key
                - secret_key
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info("cluster name: %s" % cluster_name)
            op = utils.exec_shell_cmd("radosgw-admin user list")
            if user_id in op:
                cmd = f"radosgw-admin user info --uid='{user_id}' --cluster {cluster_name}"
            else:
                cmd = f"radosgw-admin user create --uid='{user_id}' --display-name='{displayname}' --cluster {cluster_name}"
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details["user_id"] = v_as_json["user_id"]
            user_details["display_name"] = v_as_json["display_name"]
            user_details["access_key"] = v_as_json["keys"][0]["access_key"]
            user_details["secret_key"] = v_as_json["keys"][0]["secret_key"]
            user_info = basic_io_structure.user(
                **{
                    "user_id": user_details["user_id"],
                    "access_key": user_details["access_key"],
                    "secret_key": user_details["secret_key"],
                })
            write_user_info.add_user_info(user_info)
            log.info("access_key: %s" % user_details["access_key"])
            log.info("secret_key: %s" % user_details["secret_key"])
            log.info("user_id: %s" % user_details["user_id"])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
Example #9
0
def test_exec(config):
    test_info = AddTestInfo('Bucket Request Payer')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            s3_object_names = []
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    each_user['user_id'], rand_no=bc)
                log.info('creating bucket with name: %s' %
                         bucket_name_to_create)
                # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                bucket = resuables.create_bucket(
                    bucket_name=bucket_name_to_create,
                    rgw=rgw_conn,
                    user_info=each_user)
                bucket_request_payer = s3lib.resource_op({
                    'obj': rgw_conn,
                    'resource': 'BucketRequestPayment',
                    'args': [bucket.name]
                })
                # change the bucket request payer to 'requester'
                payer = {'Payer': 'Requester'}
                response = s3lib.resource_op({
                    'obj':
                    bucket_request_payer,
                    'resource':
                    'put',
                    'kwargs':
                    dict(RequestPaymentConfiguration=payer)
                })
                log.info(response)

        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #10
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    if config.cluster_type == "brownfield":
        log.info("Check sharding is enabled or not")
        cmd = "radosgw-admin zonegroup get"
        out = utils.exec_shell_cmd(cmd)
        zonegroup = json.loads(out)
        zonegroup = zonegroup.get("enabled_features")
        log.info(zonegroup)
        if "resharding" in zonegroup:
            log.info("sharding is enabled")
        else:
            log.info("sharding is not enabled")
            if config.enable_sharding is True:
                log.info(
                    "Enabling sharding on cluster since cluster is brownfield")
                cmd = "radosgw-admin zonegroup get"
                out = utils.exec_shell_cmd(cmd)
                zonegroup = json.loads(out)
                zonegroup_name = zonegroup.get("name")
                log.info(zonegroup_name)
                cmd = (
                    "radosgw-admin zonegroup modify --rgw-zonegroup=%s --enable-feature=resharding"
                    % zonegroup_name)
                out = utils.exec_shell_cmd(cmd)
                cmd = "radosgw-admin period update --commit"
                out = utils.exec_shell_cmd(cmd)
                cmd = "radosgw-admin zonegroup get"
                out = utils.exec_shell_cmd(cmd)
                zonegroup = json.loads(out)
                zonegroup = zonegroup.get("enabled_features")
                log.info(zonegroup)
                if "resharding" in zonegroup:
                    log.info("sharding is enabled")
                else:
                    raise TestExecError("sharding has not enabled")
            else:
                raise TestExecError("sharding has not enabled")

    if config.cluster_type == "greenfield":
        log.info("Check sharding is enabled or not")
        cmd = "radosgw-admin zonegroup get"
        out = utils.exec_shell_cmd(cmd)
        zonegroup = json.loads(out)
        zonegroup = zonegroup.get("enabled_features")
        log.info(zonegroup)
        if "resharding" in zonegroup:
            log.info("sharding has enabled already")
        else:
            raise TestExecError("sharding has not enabled already")
def test_exec(config):
    test_info = AddTestInfo('Test Byte range')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        # create user
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user, ssl=config.ssl)
            rgw_conn = auth.do_auth()
            rgw_conn2 = auth.do_auth_using_client()
            # create buckets
            log.info('no of buckets to create: %s' % config.bucket_count)
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=1)
                bucket = resuables.create_bucket(bucket_name, rgw_conn, each_user)
                # uploading data
                log.info('s3 objects to create: %s' % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, each_user)
                    log.info('testing for negative range')
                    response = rgw_conn2.get_object(Bucket=bucket.name, Key=s3_object_name, Range='-2--1')
                    log.info('response: %s\n' % response)
                    log.info('Content-Lenght: %s' % response['ContentLength'])
                    log.info('s3_object_size: %s' % (config.obj_size * 1024 * 1024))
                    if response['ContentLength'] != config.obj_size * 1024 * 1024:
                        TestExecError("Content Lenght not matched")
                    log.info('testing for one positive and one negative range')
                    response = rgw_conn2.get_object(Bucket=bucket.name, Key=s3_object_name, Range='-1-3')
                    log.info('response: %s\n' % response)
                    log.info('Content-Length: %s' % response['ContentLength'])
                    log.info('s3_object_size: %s' % (config.obj_size * 1024 * 1024))
                    if response['ContentLength'] != config.obj_size * 1024 * 1024:
                        TestExecError("Content Lenght not matched")

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #12
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        bucket_name_to_create2 = utils.gen_bucket_name_from_userid(each_user['user_id'])
        log.info('creating bucket with name: %s' % bucket_name_to_create2)
        bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn, each_user)
Example #13
0
    def create_admin_user(self, user_id, displayname, cluster_name='ceph'):
        """
            Function to create a S3-interface/admin user

            The S3-interface/admin user is created with the user_id, displayname, cluster_name.

            Parameters:
                user_id (char): id of the user
                displayname (char): Display Name of the user
                cluster_name (char): Name of the ceph cluster. defaults to 'ceph'

            Returns:
                user details, which contain the following
                    - user_id
                    - display_name
                    - access_key
                    - secret_key
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            log.info('cluster name: %s' % cluster_name)
            cmd = 'radosgw-admin user create --uid=%s --display-name=%s --cluster %s' % (
                user_id, displayname, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['user_id']
            user_details['display_name'] = v_as_json['display_name']
            user_details['access_key'] = v_as_json['keys'][0]['access_key']
            user_details['secret_key'] = v_as_json['keys'][0]['secret_key']
            user_info = basic_io_structure.user(
                **{
                    'user_id': user_details['user_id'],
                    'access_key': user_details['access_key'],
                    'secret_key': user_details['secret_key']
                })
            write_user_info.add_user_info(user_info)
            log.info('access_key: %s' % user_details['access_key'])
            log.info('secret_key: %s' % user_details['secret_key'])
            log.info('user_id: %s' % user_details['user_id'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            # traceback.print_exc(e)
            return False
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    umgmt = UserMgmt()
    ceph_conf = CephConfOp()
    rgw_service = RGWService()
    # preparing data
    user_name = resource_op.create_users(no_of_users_to_create=1)[0]["user_id"]
    tenant = "tenant"
    tenant_user_info = umgmt.create_tenant_user(tenant_name=tenant,
                                                user_id=user_name,
                                                displayname=user_name)
    user_info = umgmt.create_subuser(tenant_name=tenant, user_id=user_name)

    hostname = socket.gethostname()
    ip = socket.gethostbyname(hostname)
    port = utils.get_radosgw_port_no()

    ip_and_port = f"{ip}:{port}"
    s3_auth.do_auth(tenant_user_info, ip_and_port)

    bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0)

    # Create a bucket
    s3cmd_reusable.create_bucket(bucket_name)
    log.info(f"Bucket {bucket_name} created")

    # Upload file to bucket
    uploaded_file_info = s3cmd_reusable.upload_file(
        bucket_name, test_data_path=TEST_DATA_PATH)
    uploaded_file = uploaded_file_info["name"]
    log.info(f"Uploaded file {uploaded_file} to bucket {bucket_name}")

    # Delete file from bucket
    s3cmd_reusable.delete_file(bucket_name, uploaded_file)
    log.info(f"Deleted file {uploaded_file} from bucket {bucket_name}")

    # Delete bucket
    s3cmd_reusable.delete_bucket(bucket_name)
    log.info(f"Bucket {bucket_name} deleted")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
def test_exec(config):
    """
    Executes test based on configuration passed
    Args:
        config(object): Test configuration
    """
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    user_info = resource_op.create_users(
        no_of_users_to_create=config.user_count)[0]
    user_name = user_info["user_id"]

    ip_and_port = s3cmd_reusable.get_rgw_ip_and_port()
    s3_auth.do_auth(user_info, ip_and_port)

    if config.bucket_stats:
        bucket_name = utils.gen_bucket_name_from_userid(user_name, rand_no=0)
        s3cmd_reusable.create_bucket(bucket_name)
        log.info(f"Bucket {bucket_name} created")
        utils.exec_shell_cmd(f"fallocate -l 25m obj25m")
        object_name = f"s3://{bucket_name}/encyclopedia/space & universe/.bkp/journal$i"
        range_val = f"1..{config.objects_count}"
        cmd = ("for i in {" + range_val +
               "}; do /home/cephuser/venv/bin/s3cmd put obj25m " +
               object_name + ";done;")

        rc = utils.exec_shell_cmd(cmd)

        if rc:
            raise AssertionError("expected scenario is not achieved!!!")

        bucket_stats = utils.exec_shell_cmd(
            f"radosgw-admin bucket stats --bucket {bucket_name}")
        log.info(f" bucket stats are :{bucket_stats}")

        data = json.loads(bucket_stats)

        num_objects = data["usage"]["rgw.main"]["num_objects"]
        log.info(f"num objects :{num_objects}")

        object_count = utils.exec_shell_cmd(
            f"/home/cephuser/venv/bin/s3cmd ls s3://{bucket_name} --recursive | wc -l"
        )
        log.info(f"object_count :{object_count}")

        if int(num_objects) != int(object_count):
            raise AssertionError("Inconsistency found in number of objects")

        if "rgw.none" in data["usage"].keys():
            raise AssertionError("inconsistency issue observed")
def test_exec(config):
    test_info = AddTestInfo("Test Byte range")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    test_info.started_info()
    # create user
    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        # create buckets
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name = utils.gen_bucket_name_from_userid(
                each_user["user_id"], rand_no=1
            )
            bucket = reusable.create_bucket(bucket_name, rgw_conn, each_user)
            # uploading data
            log.info("s3 objects to create: %s" % config.objects_count)
            for oc, size in config.mapped_sizes.items():
                config.obj_size = size
                s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                reusable.upload_object(
                    s3_object_name, bucket, TEST_DATA_PATH, config, each_user
                )
                log.info("testing for negative range")
                response = rgw_conn2.get_object(
                    Bucket=bucket.name, Key=s3_object_name, Range="-2--1"
                )
                log.info("response: %s\n" % response)
                log.info("Content-Lenght: %s" % response["ContentLength"])
                log.info("s3_object_size: %s" % (config.obj_size * 1024 * 1024))
                if response["ContentLength"] != config.obj_size * 1024 * 1024:
                    TestExecError("Content Lenght not matched")
                log.info("testing for one positive and one negative range")
                response = rgw_conn2.get_object(
                    Bucket=bucket.name, Key=s3_object_name, Range="-1-3"
                )
                log.info("response: %s\n" % response)
                log.info("Content-Length: %s" % response["ContentLength"])
                log.info("s3_object_size: %s" % (config.obj_size * 1024 * 1024))
                if response["ContentLength"] != config.obj_size * 1024 * 1024:
                    TestExecError("Content Lenght not matched")
Example #17
0
    def create_subuser(self, tenant_name, user_id, cluster_name="ceph"):
        """
        Function to create an subuser under a tenant.

        To create an swift-interface user under tenant.
        Parameters:
             tenant_name (char): Name of the tenant
             user_id (char): id of the user
             cluster_name (char): Name of the ceph cluster. defaults to 'ceph'
        """
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            keys = utils.gen_access_key_secret_key(user_id)
            cmd = (
                "radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s"
                % (tenant_name, user_id, user_id, tenant_name, cluster_name))
            log.info("cmd to execute:\n%s" % cmd)
            variable = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
                                        shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details["user_id"] = v_as_json["subusers"][0]["id"]
            user_details["key"] = v_as_json["swift_keys"][0]["secret_key"]
            user_details["tenant"], _ = user_details["user_id"].split("$")
            user_info = basic_io_structure.user(
                **{
                    "user_id": user_details["user_id"],
                    "secret_key": user_details["key"],
                    "access_key": " ",
                })
            write_user_info.add_user_info(
                dict(user_info, **tenant_info.tenant(user_details["tenant"])))
            log.info("secret_key: %s" % user_details["key"])
            log.info("user_id: %s" % user_details["user_id"])
            log.info("tenant: %s" % user_details["tenant"])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            return False
def test_exec(config):
    test_info = AddTestInfo('test with acls')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        s3_ops = ResourceOps()
        # create user
        config.user_count = 2
        all_users_info = s3lib.create_users(config.user_count,
                                            config.cluster_name)
        u1 = all_users_info[0]
        u2 = all_users_info[1]
        # authenticate
        u1_auth = Auth(u1)
        u1_rgw_conn = u1_auth.do_auth()
        u2_auth = Auth(u2)
        u2_rgw_conn = u2_auth.do_auth()
        no_of_buckets_to_create = 3
        u1_buckets = []
        u2_buckets = []
        for i in range(no_of_buckets_to_create):
            u1_bucket = create_bucket(u1_rgw_conn, u1, rand_no=i)
            log.info('u1_bucket_name: %s' % u1_bucket.name)
            u1_buckets.append(u1_bucket)
            u2_bucket = create_bucket(u2_rgw_conn, u2, rand_no=i)
            log.info('u2_bucket_name: %s' % u2_bucket.name)
            u2_buckets.append(u2_bucket)
        # test_acls_private(u1_rgw_conn, u1, u2, u1_buckets[0], u2_buckets[0])
        test_acls_public_write(u1_rgw_conn, u1, u2, u1_buckets[1],
                               u2_buckets[1])
        # test_acls_public_read(u1_rgw_conn, u1, u2, u1_buckets[2], u2_buckets[2])
        # print u1_bucket_info.delete()
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo('create users')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        all_users_info = s3lib.create_users(config.user_count, config.cluster_name)
        with open('user_details', 'w') as fout:
            json.dump(all_users_info, fout)
        test_info.success_status('user creation completed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('user creation failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('user creation failed')
        sys.exit(1)
Example #20
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    test_info = AddTestInfo('create m buckets')
    conf_path = '/etc/ceph/%s.conf' % config.cluster_name
    ceph_conf = CephConfOp(conf_path)
    rgw_service = RGWService()
    try:
        test_info.started_info()
        # get user
        with open('user_details') as fout:
            all_users_info = simplejson.load(fout)
        for each_user in all_users_info:
            user_info = basic_io_structure.user(
                **{
                    'user_id': each_user['user_id'],
                    'access_key': each_user['access_key'],
                    'secret_key': each_user['secret_key']
                })
            write_user_info.add_user_info(user_info)
        for each_user in all_users_info:
            # authenticate
            auth = Auth(each_user)
            rgw_conn = auth.do_auth()
            # enabling sharding
            if config.test_ops['sharding']['enable'] is True:
                log.info('enabling sharding on buckets')
                max_shards = config.test_ops['sharding']['max_shards']
                log.info('making changes to ceph.conf')
                ceph_conf.set_to_ceph_conf(
                    'global', ConfigOpts.rgw_override_bucket_index_max_shards,
                    max_shards)
                log.info('trying to restart services ')
                srv_restarted = rgw_service.restart()
                time.sleep(10)
                if srv_restarted is False:
                    raise TestExecError("RGW service restart failed")
                else:
                    log.info('RGW service restarted')
            # create buckets
            if config.test_ops['create_bucket'] is True:
                log.info('no of buckets to create: %s' % config.bucket_count)
                for bc in range(config.bucket_count):
                    bucket_name_to_create = utils.gen_bucket_name_from_userid(
                        each_user['user_id'], rand_no=bc)
                    log.info('creating bucket with name: %s' %
                             bucket_name_to_create)
                    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
                    bucket = s3lib.resource_op({
                        'obj': rgw_conn,
                        'resource': 'Bucket',
                        'args': [bucket_name_to_create]
                    })
                    created = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'create',
                        'args': None,
                        'extra_info': {
                            'access_key': each_user['access_key']
                        }
                    })
                    if created is False:
                        raise TestExecError(
                            "Resource execution failed: bucket creation failed"
                        )
                    if created is not None:
                        response = HttpResponseParser(created)
                        if response.status_code == 200:
                            log.info('bucket created')
                        else:
                            raise TestExecError("bucket creation failed")
                    else:
                        raise TestExecError("bucket creation failed")
                    if config.test_ops['sharding']['enable'] is True:
                        cmd = 'radosgw-admin metadata get bucket:%s --cluster %s | grep bucket_id' \
                              % (bucket.name, config.cluster_name)
                        out = utils.exec_shell_cmd(cmd)
                        b_id = out.replace(
                            '"',
                            '').strip().split(":")[1].strip().replace(',', '')
                        cmd2 = 'rados -p default.rgw.buckets.index ls --cluster %s | grep %s' \
                               % (config.cluster_name, b_id)
                        out = utils.exec_shell_cmd(cmd2)
                        log.info(
                            'got output from sharing verification.--------')
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #21
0
def test_exec(config):
    test_info = AddTestInfo('test swift user key gen')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    umgmt = UserMgmt()

    try:
        test_info.started_info()

        # preparing data

        user_names = ['tuffy', 'scooby', 'max']
        tenant = 'tenant'

        tenant_user_info = umgmt.create_tenant_user(
            tenant_name=tenant,
            user_id=user_names[0],
            displayname=user_names[0],
            cluster_name=config.cluster_name)

        user_info = umgmt.create_subuser(tenant_name=tenant,
                                         user_id=user_names[0],
                                         cluster_name=config.cluster_name)

        auth = Auth(user_info)

        rgw = auth.do_auth()

        for cc in range(config.container_count):

            container_name = utils.gen_bucket_name_from_userid(
                user_info['user_id'], rand_no=cc)

            container = swiftlib.resource_op({
                'obj': rgw,
                'resource': 'put_container',
                'args': [container_name]
            })

            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield")

            for oc in range(config.objects_count):

                swift_object_name = utils.gen_s3_object_name(
                    '%s.container.%s' % (user_names[0], cc), oc)

                log.info('object name: %s' % swift_object_name)

                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)

                log.info('object path: %s' % object_path)

                object_size = utils.get_file_size(
                    config.objects_size_range['min'],
                    config.objects_size_range['max'])

                data_info = manage_data.io_generator(object_path, object_size)

                # upload object

                if data_info is False:
                    TestExecError("data creation failed")

                log.info('uploading object: %s' % object_path)

                with open(object_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

                # download object

                swift_object_download_fname = swift_object_name + ".download"

                log.info('download object name: %s' %
                         swift_object_download_fname)

                swift_object_download_path = os.path.join(
                    TEST_DATA_PATH, swift_object_download_fname)

                log.info('download object path: %s' %
                         swift_object_download_path)

                swift_object_downloaded = rgw.get_object(
                    container_name, swift_object_name)

                with open(swift_object_download_path, 'w') as fp:
                    fp.write(swift_object_downloaded[1])

                # modify and re-upload

                log.info('appending new message to test_data')

                message_to_append = 'adding new msg after download'

                fp = open(swift_object_download_path, 'a+')
                fp.write(message_to_append)
                fp.close()

                with open(swift_object_download_path, 'r') as fp:
                    rgw.put_object(container_name,
                                   swift_object_name,
                                   contents=fp.read(),
                                   content_type='text/plain')

                # delete object

                log.info('deleting swift object')

                rgw.delete_object(container_name, swift_object_name)

            # delete container

            log.info('deleting swift container')

            rgw.delete_container(container_name)

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #22
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    # create user
    user_info = s3lib.create_users(config.user_count)
    for each_user in user_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        rgw_conn2 = auth.do_auth_using_client()
        buckets = []
        buckets_meta = []
        if config.test_ops["create_bucket"]:
            log.info("no of buckets to create: %s" % config.bucket_count)
            # create bucket
            for bc in range(config.bucket_count):
                bucket_name = utils.gen_bucket_name_from_userid(
                    each_user["user_id"], rand_no=bc
                )
                bucket = reusable.create_bucket(bucket_name, rgw_conn, each_user)
                buckets.append(bucket_name)
                buckets_meta.append(bucket)
                if config.test_ops["create_object"]:
                    for oc, size in list(config.mapped_sizes.items()):
                        config.obj_size = size
                        log.info(f"s3 objects to create of size {config.obj_size}")
                        s3_object_name = config.lifecycle_conf[0]["Filter"][
                            "Prefix"
                        ] + str(oc)
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        log.info(
                            f"s3 object path: {s3_object_path}, name: {s3_object_name}"
                        )
                        reusable.upload_object(
                            s3_object_name, bucket, TEST_DATA_PATH, config, each_user
                        )
                life_cycle_rule = {"Rules": config.lifecycle_conf}
                reusable.put_bucket_lifecycle(
                    bucket, rgw_conn, rgw_conn2, life_cycle_rule
                )

        log.info(f"buckets are {buckets}")

        for bkt in buckets:
            bucket_details = json.loads(
                utils.exec_shell_cmd(f"radosgw-admin bucket stats --bucket={bkt}")
            )
            num_objects = bucket_details["usage"]["rgw.main"]["num_objects"]
            log.info(f"objects count in bucket {bkt} is {num_objects}")

        lc_list_op_before = json.loads(utils.exec_shell_cmd("radosgw-admin lc list"))
        log.info(f"lc lists before lc process is {lc_list_op_before}")

        utils.exec_shell_cmd(f"radosgw-admin lc process --bucket {buckets[0]}")
        time.sleep(60)
        lc_list_op_after = json.loads(utils.exec_shell_cmd("radosgw-admin lc list"))
        log.info(f"lc lists after lc process is {lc_list_op_after}")
        completed_bucket = 0
        completed_bkt_name = ""
        for data in lc_list_op_after:
            if data["status"] == "COMPLETE":
                completed_bucket += 1
                completed_bkt_name = data["bucket"]

        bucket_details = json.loads(
            utils.exec_shell_cmd(f"radosgw-admin bucket stats --bucket={buckets[0]}")
        )
        num_objects_after = bucket_details["usage"]["rgw.main"]["num_objects"]

        if config.object_expire:
            if (
                completed_bucket == 1
                and (buckets[0] in completed_bkt_name)
                and num_objects_after == 0
            ):
                log.info(f"processing of single bucket:{buckets[0]} succeeded")
            else:
                raise TestExecError("LC Processing of a single bucket failed")
        else:
            if (
                completed_bucket == 1
                and (buckets[0] in completed_bkt_name)
                and num_objects_after == config.objects_count
            ):
                log.info(f"Successfully completed, non-expired objects did not deleted")
            else:
                raise TestExecError(
                    "Failed! removed non-expired objects from the bucket"
                )

        delete_conf = config.lifecycle_conf[0]
        delete_conf["Status"] = "Disabled"
        for bkt in buckets_meta:
            life_cycle_rule_delete = {"Rules": [delete_conf]}
            reusable.put_bucket_lifecycle(
                bkt, rgw_conn, rgw_conn2, life_cycle_rule_delete
            )
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_conf = CephConfOp()
    rgw_service = RGWService()

    log.info('starting IO')
    config.user_count = 1
    user_info = s3lib.create_users(config.user_count)
    user_info = user_info[0]
    auth = Auth(user_info, ssl=config.ssl)
    rgw_conn = auth.do_auth()
    log.info('sharding configuration will be added now.')
    if config.sharding_type == 'dynamic':
        log.info('sharding type is dynamic')
        # for dynamic,
        # the number of shards  should be greater than   [ (no of objects)/(max objects per shard) ]
        # example: objects = 500 ; max object per shard = 10
        # then no of shards should be at least 50 or more
        time.sleep(15)
        log.info('making changes to ceph.conf')
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard,
                                   str(config.max_objects_per_shard))
        ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
                                   'True')
        num_shards_expected = config.objects_count / config.max_objects_per_shard
        log.info('num_shards_expected: %s' % num_shards_expected)
        log.info('trying to restart services ')
        srv_restarted = rgw_service.restart()
        time.sleep(30)
        if srv_restarted is False:
            raise TestExecError("RGW service restart failed")
        else:
            log.info('RGW service restarted')

    config.bucket_count = 1
    objects_created_list = []
    log.info('no of buckets to create: %s' % config.bucket_count)
    bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'],
                                                    rand_no=1)
    bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
    if config.test_ops.get('enable_version', False):
        log.info('enable bucket version')
        reusable.enable_versioning(bucket, rgw_conn, user_info,
                                   write_bucket_io_info)
    log.info('s3 objects to create: %s' % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
        if config.test_ops.get('enable_version', False):
            reusable.upload_version_object(config, user_info, rgw_conn,
                                           s3_object_name, config.obj_size,
                                           bucket, TEST_DATA_PATH)
        else:
            reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH,
                                   config, user_info)
        objects_created_list.append((s3_object_name, s3_object_path))

    if config.sharding_type == 'manual':
        log.info('sharding type is manual')
        # for manual.
        # the number of shards will be the value set in the command.
        time.sleep(15)
        log.info('in manual sharding')
        cmd_exec = utils.exec_shell_cmd(
            'radosgw-admin bucket reshard --bucket=%s --num-shards=%s '
            '--yes-i-really-mean-it' % (bucket.name, config.shards))
        if cmd_exec is False:
            raise TestExecError("manual resharding command execution failed")

    sleep_time = 600
    log.info(f'verification starts after waiting for {sleep_time} seconds')
    time.sleep(sleep_time)
    op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" %
                              bucket.name)
    json_doc = json.loads(op)
    bucket_id = json_doc['data']['bucket']['bucket_id']
    op2 = utils.exec_shell_cmd(
        "radosgw-admin metadata get bucket.instance:%s:%s" %
        (bucket.name, bucket_id))
    json_doc2 = json.loads((op2))
    num_shards_created = json_doc2['data']['bucket_info']['num_shards']
    log.info('no_of_shards_created: %s' % num_shards_created)
    if config.sharding_type == 'manual':
        if config.shards != num_shards_created:
            raise TestExecError("expected number of shards not created")
        log.info('Expected number of shards created')
    if config.sharding_type == 'dynamic':
        log.info('Verify if resharding list is empty')
        reshard_list_op = json.loads(
            utils.exec_shell_cmd("radosgw-admin reshard list"))
        if not reshard_list_op:
            log.info(
                'for dynamic number of shards created should be greater than or equal to number of expected shards'
            )
            log.info('no_of_shards_expected: %s' % num_shards_expected)
            if int(num_shards_created) >= int(num_shards_expected):
                log.info('Expected number of shards created')
        else:
            raise TestExecError('Expected number of shards not created')

    if config.test_ops.get('delete_bucket_object', False):
        if config.test_ops.get('enable_version', False):
            for name, path in objects_created_list:
                reusable.delete_version_object(bucket, name, path, rgw_conn,
                                               user_info)
        else:
            reusable.delete_objects(bucket)
        reusable.delete_bucket(bucket)
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    write_bucket_io_info = BucketIoInfo()
    write_key_io_info = KeyIoInfo()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    all_users_info = s3lib.create_users(config.user_count)
    extra_user = s3lib.create_users(1)[0]
    extra_user_auth = Auth(extra_user, ssl=config.ssl)
    extra_user_conn = extra_user_auth.do_auth()
    for each_user in all_users_info:
        # authenticate
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        s3_object_names = []
        # create buckets
        log.info('no of buckets to create: %s' % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name_to_create = utils.gen_bucket_name_from_userid(each_user['user_id'], rand_no=bc)
            log.info('creating bucket with name: %s' % bucket_name_to_create)
            # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
            bucket = s3lib.resource_op({'obj': rgw_conn,
                                        'resource': 'Bucket',
                                        'args': [bucket_name_to_create]})
            # created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': each_user['access_key']})
            created = s3lib.resource_op({'obj': bucket,
                                         'resource': 'create',
                                         'args': None,
                                         'extra_info': {'access_key': each_user['access_key']}})
            if created is False:
                raise TestExecError("Resource execution failed: bucket creation faield")
            if created is not None:
                response = HttpResponseParser(created)
                if response.status_code == 200:
                    log.info('bucket created')
                else:
                    raise TestExecError("bucket creation failed")
            else:
                raise TestExecError("bucket creation failed")
            # getting bucket version object
            if config.test_ops['enable_version'] is True:
                log.info('bucket versionig test on bucket: %s' % bucket.name)
                # bucket_versioning = s3_ops.resource_op(rgw_conn, 'BucketVersioning', bucket.name)
                bucket_versioning = s3lib.resource_op({'obj': rgw_conn,
                                                       'resource': 'BucketVersioning',
                                                       'args': [bucket.name]})
                # checking the versioning status
                # version_status = s3_ops.resource_op(bucket_versioning, 'status')
                version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                    'resource': 'status',
                                                    'args': None
                                                    })
                if version_status is None:
                    log.info('bucket versioning still not enabled')
                # enabling bucket versioning
                # version_enable_status = s3_ops.resource_op(bucket_versioning, 'enable')
                version_enable_status = s3lib.resource_op({'obj': bucket_versioning,
                                                           'resource': 'enable',
                                                           'args': None,
                                                           })
                response = HttpResponseParser(version_enable_status)
                if response.status_code == 200:
                    log.info('version enabled')
                    write_bucket_io_info.add_versioning_status(each_user['access_key'],bucket.name,
                                                               VERSIONING_STATUS['ENABLED'])

                else:
                    raise TestExecError("version enable failed")
                if config.objects_count > 0:
                    log.info('s3 objects to create: %s' % config.objects_count)
                    for oc, s3_object_size in list(config.mapped_sizes.items()):
                        # versioning upload
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                        s3_object_names.append(s3_object_name)
                        log.info('s3 object name: %s' % s3_object_name)
                        log.info('versioning count: %s' % config.version_count)
                        s3_object_name = utils.gen_s3_object_name(bucket_name_to_create, str(oc))
                        s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                        original_data_info = manage_data.io_generator(s3_object_path, s3_object_size)
                        if original_data_info is False:
                            TestExecError("data creation failed")
                        created_versions_count = 0
                        for vc in range(config.version_count):
                            log.info('version count for %s is %s' % (s3_object_name, str(vc)))
                            log.info('modifying data: %s' % s3_object_name)
                            modified_data_info = manage_data.io_generator(s3_object_path, s3_object_size,
                                                                          op='append',
                                                                          **{'message': '\nhello for version: %s\n'
                                                                                        % str(vc)})
                            if modified_data_info is False:
                                TestExecError("data modification failed")
                            log.info('uploading s3 object: %s' % s3_object_path)
                            upload_info = dict({'access_key': each_user['access_key'],
                                                'versioning_status': VERSIONING_STATUS['ENABLED'],
                                                'version_count_no': vc}, **modified_data_info)
                            s3_obj = s3lib.resource_op({'obj': bucket,
                                                        'resource': 'Object',
                                                        'args': [s3_object_name],
                                                        'extra_info': upload_info, })
                            object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                                        'resource': 'upload_file',
                                                                        'args': [modified_data_info['name']],
                                                                        'extra_info': upload_info})
                            if object_uploaded_status is False:
                                raise TestExecError("Resource execution failed: object upload failed")
                            if object_uploaded_status is None:
                                log.info('object uploaded')
                                s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                                log.info('current_version_id: %s' % s3_obj.version_id)
                                key_version_info = basic_io_structure.version_info(
                                    **{'version_id': s3_obj.version_id,
                                       'md5_local': upload_info['md5'],
                                       'count_no': vc,
                                       'size': upload_info['size']})
                                log.info('key_version_info: %s' % key_version_info)
                                write_key_io_info.add_versioning_info(each_user['access_key'], bucket.name,
                                                                      s3_object_path, key_version_info)
                                created_versions_count += 1
                                log.info('created_versions_count: %s' % created_versions_count)
                                log.info('adding metadata')
                                metadata1 = {"m_data1": "this is the meta1 for this obj"}
                                s3_obj.metadata.update(metadata1)
                                metadata2 = {"m_data2": "this is the meta2 for this obj"}
                                s3_obj.metadata.update(metadata2)
                                log.info('metadata for this object: %s' % s3_obj.metadata)
                                log.info('metadata count for object: %s' % (len(s3_obj.metadata)))
                                if not s3_obj.metadata:
                                    raise TestExecError('metadata not created even adding metadata')
                                versions = bucket.object_versions.filter(Prefix=s3_object_name)
                                created_versions_count_from_s3 = len([v.version_id for v in versions])
                                log.info('created versions count on s3: %s' % created_versions_count_from_s3)
                                if created_versions_count is created_versions_count_from_s3:
                                    log.info('no new versions are created when added metdata')
                                else:
                                    raise TestExecError("version count missmatch, "
                                                        "possible creation of version on adding metadata")
                            s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                            object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                          'resource': 'download_file',
                                                                          'args': [s3_object_name,
                                                                                   s3_object_download_path],
                                                                          })
                            if object_downloaded_status is False:
                                raise TestExecError("Resource execution failed: object download failed")
                            if object_downloaded_status is None:
                                log.info('object downloaded')
                            # checking md5 of the downloaded file
                            s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                            log.info('downloaded_md5: %s' % s3_object_downloaded_md5)
                            log.info('uploaded_md5: %s' % modified_data_info['md5'])
                            # tail_op = utils.exec_shell_cmd('tail -l %s' % s3_object_download_path)
                        log.info('all versions for the object: %s\n' % s3_object_name)
                        versions = bucket.object_versions.filter(Prefix=s3_object_name)
                        for version in versions:
                            log.info('key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                        if config.test_ops.get('set_acl', None) is True:
                            s3_obj_acl = s3lib.resource_op({'obj': rgw_conn,
                                                            'resource': 'ObjectAcl',
                                                            'args': [bucket.name, s3_object_name]})
                            # setting acl to private, just need to set to any acl and
                            # check if its set - check by response code
                            acls_set_status = s3_obj_acl.put(ACL='private')
                            response = HttpResponseParser(acls_set_status)
                            if response.status_code == 200:
                                log.info('ACLs set')
                            else:
                                raise TestExecError("Acls not Set")
                            # get obj details based on version id
                            for version in versions:
                                log.info('getting info for version id: %s' % version.version_id)
                                obj = s3lib.resource_op({'obj': rgw_conn,
                                                         'resource': 'Object',
                                                         'args': [bucket.name, s3_object_name]})
                                log.info('obj get detils :%s\n' % (obj.get(VersionId=version.version_id)))
                        if config.test_ops['copy_to_version'] is True:
                            # reverting object to one of the versions ( randomly chosen )
                            version_id_to_copy = random.choice([v.version_id for v in versions])
                            log.info('version_id_to_copy: %s' % version_id_to_copy)
                            s3_obj = rgw_conn.Object(bucket.name, s3_object_name)
                            log.info('current version_id: %s' % s3_obj.version_id)
                            copy_response = s3_obj.copy_from(CopySource={'Bucket': bucket.name,
                                                                         'Key': s3_object_name,
                                                                         'VersionId': version_id_to_copy})
                            log.info('copy_response: %s' % copy_response)
                            if copy_response is None:
                                raise TestExecError("copy object from version id failed")
                            # current_version_id = copy_response['VersionID']
                            log.info('current_version_id: %s' % s3_obj.version_id)
                            # delete the version_id_to_copy object
                            s3_obj.delete(VersionId=version_id_to_copy)
                            log.info('all versions for the object after the copy operation: %s\n' % s3_object_name)
                            for version in versions:
                                log.info(
                                    'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                            # log.info('downloading current s3object: %s' % s3_object_name)
                            # s3_obj.download_file(s3_object_name + ".download")
                        if config.test_ops['delete_object_versions'] is True:
                            log.info('deleting s3_obj keys and its versions')
                            s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, s3_object_name]})
                            log.info('deleting versions for s3 obj: %s' % s3_object_name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not None:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        write_key_io_info.delete_version_info(each_user['access_key'], bucket.name,
                                                                              s3_object_path, version.version_id)
                                    else:
                                        raise TestExecError("version  deletion failed")
                                else:
                                    raise TestExecError("version deletion failed")
                            log.info('available versions for the object')
                            versions = bucket.object_versions.filter(Prefix=s3_object_name)
                            for version in versions:
                                log.info('key_name: %s --> version_id: %s' % (
                                    version.object_key, version.version_id))
                        if config.test_ops.get('delete_from_extra_user') is True:
                            log.info('trying to delete objects from extra user')
                            s3_obj = s3lib.resource_op({'obj': extra_user_conn,
                                                        'resource': 'Object',
                                                        'args': [bucket.name, s3_object_name]})
                            log.info('deleting versions for s3 obj: %s' % s3_object_name)
                            for version in versions:
                                log.info('trying to delete obj version: %s' % version.version_id)
                                del_obj_version = s3lib.resource_op({'obj': s3_obj,
                                                                     'resource': 'delete',
                                                                     'kwargs': dict(
                                                                         VersionId=version.version_id)})
                                log.info('response:\n%s' % del_obj_version)
                                if del_obj_version is not False:
                                    response = HttpResponseParser(del_obj_version)
                                    if response.status_code == 204:
                                        log.info('version deleted ')
                                        write_key_io_info.delete_version_info(each_user['access_key'],
                                                                              bucket.name,
                                                                              s3_object_path,
                                                                              version.version_id)
                                        raise TestExecError("version and deleted, this should not happen")
                                    else:
                                        log.info('version did not delete, expected behaviour')
                                else:
                                    log.info('version did not delete, expected behaviour')
                        if config.local_file_delete is True:
                            log.info('deleting local file')
                            utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
                if config.test_ops['suspend_version'] is True:
                    log.info('suspending versioning')
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')
                    suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                'resource': 'suspend',
                                                                'args': None})
                    response = HttpResponseParser(suspend_version_status)
                    if response.status_code == 200:
                        log.info('versioning suspended')
                        write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name,
                                                                   VERSIONING_STATUS['SUSPENDED'])
                    else:
                        raise TestExecError("version suspend failed")
                    # getting all objects in the bucket
                    log.info('getting all objects in the bucket')
                    objects = s3lib.resource_op({'obj': bucket,
                                                 'resource': 'objects',
                                                 'args': None})
                    log.info('objects :%s' % objects)
                    all_objects = s3lib.resource_op({'obj': objects,
                                                     'resource': 'all',
                                                     'args': None})
                    log.info('all objects: %s' % all_objects)
                    log.info('all objects2 :%s ' % bucket.objects.all())
                    for obj in all_objects:
                        log.info('object_name: %s' % obj.key)
                        versions = bucket.object_versions.filter(Prefix=obj.key)
                        log.info('displaying all versions of the object')
                        for version in versions:
                            log.info(
                                'key_name: %s --> version_id: %s' % (version.object_key, version.version_id))
                if config.test_ops.get('suspend_from_extra_user') is True:
                    log.info('suspending versioning from extra user')
                    # suspend_version_status = s3_ops.resource_op(bucket_versioning, 'suspend')

                    bucket_versioning = s3lib.resource_op({'obj': extra_user_conn,
                                                           'resource': 'BucketVersioning',
                                                           'args': [bucket.name]})

                    suspend_version_status = s3lib.resource_op({'obj': bucket_versioning,
                                                                'resource': 'suspend',
                                                                'args': None})
                    if suspend_version_status is not False:
                        response = HttpResponseParser(suspend_version_status)
                        if response.status_code == 200:
                            log.info('versioning suspended')
                            write_bucket_io_info.add_versioning_status(each_user['access_key'], bucket.name,
                                                                       VERSIONING_STATUS['SUSPENDED'])
                            raise TestExecError('version suspended, this should not happen')
                    else:
                        log.info('versioning not suspended, expected behaviour')
            if config.test_ops.get('upload_after_suspend') is True:
                log.info('trying to upload after suspending versioning on bucket')
                for oc, s3_object_size in list(config.mapped_sizes.items()):
                    # non versioning upload
                    s3_object_name = s3_object_names[oc] + ".after_version_suspending"
                    log.info('s3 object name: %s' % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    non_version_data_info = manage_data.io_generator(s3_object_path, s3_object_size, op="append",
                                                                     **{
                                                                         'message': '\nhello for non version\n'})
                    if non_version_data_info is False:
                        TestExecError("data creation failed")
                    log.info('uploading s3 object: %s' % s3_object_path)
                    upload_info = dict({'access_key': each_user['access_key'],
                                       'versioning_status': 'suspended'},**non_version_data_info)
                    s3_obj = s3lib.resource_op({'obj': bucket,
                                                'resource': 'Object',
                                                'args': [s3_object_name],
                                                'extra_info': upload_info})
                    object_uploaded_status = s3lib.resource_op({'obj': s3_obj,
                                                                'resource': 'upload_file',
                                                                'args': [non_version_data_info['name']],
                                                                'extra_info': upload_info})

                    if object_uploaded_status is False:
                        raise TestExecError("Resource execution failed: object upload failed")
                    if object_uploaded_status is None:
                        log.info('object uploaded')
                    s3_obj = s3lib.resource_op({'obj': rgw_conn,
                                                'resource': 'Object',
                                                'args': [bucket.name, s3_object_name]})
                    log.info('version_id: %s' % s3_obj.version_id)
                    if s3_obj.version_id is None:
                        log.info('Versions are not created after suspending')
                    else:
                        raise TestExecError('Versions are created even after suspending')
                    s3_object_download_path = os.path.join(TEST_DATA_PATH, s3_object_name + ".download")
                    object_downloaded_status = s3lib.resource_op({'obj': bucket,
                                                                  'resource': 'download_file',
                                                                  'args': [s3_object_name,
                                                                           s3_object_download_path],
                                                                  })
                    if object_downloaded_status is False:
                        raise TestExecError("Resource execution failed: object download failed")
                    if object_downloaded_status is None:
                        log.info('object downloaded')
                    # checking md5 of the downloaded file
                    s3_object_downloaded_md5 = utils.get_md5(s3_object_download_path)
                    log.info('s3_object_downloaded_md5: %s' % s3_object_downloaded_md5)
                    log.info('s3_object_uploaded_md5: %s' % non_version_data_info['md5'])
                    if config.local_file_delete is True:
                        utils.exec_shell_cmd('sudo rm -rf %s' % s3_object_path)
def test_exec(config):
    test_info = AddTestInfo('test bucket policy')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # preparing data
        user_names = ['user1', 'user2', 'user3']
        Bucket_names = ['bucket1', 'bucket2', 'bucket3']
        object_names = ['o1', 'o2']
        tenant1 = 'tenant1'
        tenant2 = 'tenant2'
        t1_u1_info = create_tenant_user(tenant_name=tenant1,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t1_u1_auth = Auth(t1_u1_info)
        t1_u1 = t1_u1_auth.do_auth()
        t2_u1_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[0],
                                        cluster_name=config.cluster_name)
        t2_u1_auth = Auth(t2_u1_info)
        t2_u1 = t2_u1_auth.do_auth()
        t1_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t1_u1,
                                           user_info=t1_u1_info)
        t2_u1_b1 = resuables.create_bucket(bucket_name=Bucket_names[0],
                                           rgw=t2_u1,
                                           user_info=t2_u1_info)
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t1_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        resuables.upload_object(s3_object_name=object_names[0],
                                bucket=t2_u1_b1,
                                TEST_DATA_PATH=TEST_DATA_PATH,
                                config=config,
                                user_info=t1_u1_info)
        t2_u2_info = create_tenant_user(tenant_name=tenant2,
                                        user_id=user_names[1],
                                        cluster_name=config.cluster_name)
        t2_u2_auth = Auth(t2_u2_info)
        t2_u2 = t2_u2_auth.do_auth()
        # will try to access the bucket and objects in both tenants
        # access t1_u1_b1
        log.info('trying to access tenant1->user1->bucket1')
        t1_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant1->user1->bucket1->object1 from tenant2->user2'
        )
        download_path1 = TEST_DATA_PATH + "/t1_u1_b1_%s.download" % object_names[
            0]
        t1_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t1_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path1]
        })
        if t1_u1_b1_o1_download is False:
            log.info('object not downloaded\n')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                "object downloaded for tenant1->user1->bucket1->object1, this should not happen"
            )
        log.info(
            'trying to access tenant2->user1->bucket1 from user2 in tenant 2')
        t2_u1_b1_from_t2_u2 = s3lib.resource_op({
            'obj': t2_u2,
            'resource': 'Bucket',
            'args': [Bucket_names[0]]
        })
        log.info(
            'trying to download tenant2->user1->bucket1->object1 from tenant2->user2'
        )
        download_path2 = TEST_DATA_PATH + "/t2_u1_b1_%s.download" % object_names[
            0]
        t2_u1_b1_o1_download = s3lib.resource_op({
            'obj':
            t2_u1_b1_from_t2_u2,
            'resource':
            'download_file',
            'args': [object_names[0], download_path2]
        })
        if t2_u1_b1_o1_download is False:
            log.info('object did not download, worked as expected')
        if t1_u1_b1_o1_download is None:
            raise TestExecError(
                'object downloaded\n'
                'downloaded tenant2->user1->bucket1->object1, this should not happen'
            )
        test_info.success_status('test passed')
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
def test_exec(config):
    test_info = AddTestInfo("test swift user key gen")
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    try:
        test_info.started_info()
        # preparing data
        user_names = ["tuffy", "scooby", "max"]
        tenant1 = "tenant"
        cmd = (
            'radosgw-admin user create --uid=%s --display-name="%s" --tenant=%s --cluster %s'
            % (user_names[0], user_names[0], tenant1, config.cluster_name)
        )
        out = utils.exec_shell_cmd(cmd)
        if out is False:
            raise TestExecError("RGW User creation error")
        log.info("output :%s" % out)
        v1_as_json = json.loads(out)
        log.info("creted user_id: %s" % v1_as_json["user_id"])
        cmd2 = (
            "radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s"
            % (tenant1, user_names[0], user_names[0], tenant1, config.cluster_name)
        )
        out2 = utils.exec_shell_cmd(cmd2)
        if out2 is False:
            raise TestExecError("sub-user creation error")
        v2_as_json = json.loads(out2)
        log.info("created subuser: %s" % v2_as_json["subusers"][0]["id"])
        cmd3 = (
            "radosgw-admin key create --subuser=%s:swift --uid=%s$%s --tenant=%s --key-type=swift --gen-secret "
            "--cluster %s"
            % (user_names[0], user_names[0], tenant1, tenant1, config.cluster_name)
        )
        out3 = utils.exec_shell_cmd(cmd3)
        if out3 is False:
            raise TestExecError("secret_key gen error")
        v3_as_json = json.loads(out3)
        log.info(
            "created subuser: %s\nsecret_key generated: %s"
            % (
                v3_as_json["swift_keys"][0]["user"],
                v3_as_json["swift_keys"][0]["secret_key"],
            )
        )
        user_info = {
            "user_id": v3_as_json["swift_keys"][0]["user"],
            "key": v3_as_json["swift_keys"][0]["secret_key"],
        }
        auth = Auth(user_info)
        rgw = auth.do_auth()
        for cc in range(config.container_count):
            container_name = utils.gen_bucket_name_from_userid(
                user_info["user_id"], rand_no=cc
            )
            container = swiftlib.resource_op(
                {"obj": rgw, "resource": "put_container", "args": [container_name]}
            )
            if container is False:
                raise TestExecError(
                    "Resource execution failed: container creation faield"
                )
            for oc in range(config.objects_count):
                swift_object_name = utils.gen_s3_object_name(
                    "%s.container.%s" % (user_names[0], cc), oc
                )
                log.info("object name: %s" % swift_object_name)
                object_path = os.path.join(TEST_DATA_PATH, swift_object_name)
                log.info("object path: %s" % object_path)
                object_size = utils.get_file_size(
                    config.objects_size_range["min"], config.objects_size_range["max"]
                )
                data_info = manage_data.io_generator(object_path, object_size)
                if data_info is False:
                    TestExecError("data creation failed")
                log.info("uploading object: %s" % object_path)
                with open(object_path, "r") as fp:
                    rgw.put_object(
                        container_name,
                        swift_object_name,
                        contents=fp.read(),
                        content_type="text/plain",
                    )
        test_info.success_status("test passed")
        sys.exit(0)
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status("test failed")
        sys.exit(1)
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    ceph_config_set = CephConfOp()
    rgw_service = RGWService()

    if config.sts is None:
        raise TestExecError("sts policies are missing in yaml config")

    # create users
    config.user_count = 2
    users_info = s3lib.create_users(config.user_count)
    # user1 is the owner
    user1, user2 = users_info[0], users_info[1]
    log.info("adding sts config to ceph.conf")
    sesison_encryption_token = "abcdefghijklmnoq"
    ceph_config_set.set_to_ceph_conf(
        "global", ConfigOpts.rgw_sts_key, sesison_encryption_token
    )
    ceph_config_set.set_to_ceph_conf("global", ConfigOpts.rgw_s3_auth_use_sts, "True")
    srv_restarted = rgw_service.restart()
    time.sleep(30)
    if srv_restarted is False:
        raise TestExecError("RGW service restart failed")
    else:
        log.info("RGW service restarted")

    auth = Auth(user1, ssl=config.ssl)
    iam_client = auth.do_auth_iam_client()

    policy_document = json.dumps(config.sts["policy_document"]).replace(" ", "")
    policy_document = policy_document.replace("<user_name>", user2["user_id"])

    role_policy = json.dumps(config.sts["role_policy"]).replace(" ", "")

    add_caps_cmd = (
        'sudo radosgw-admin caps add --uid="{user_id}" --caps="roles=*"'.format(
            user_id=user1["user_id"]
        )
    )
    utils.exec_shell_cmd(add_caps_cmd)

    role_name = f"S3RoleOf.{user1['user_id']}"
    log.info(f"role_name: {role_name}")

    log.info("creating role")
    create_role_response = iam_client.create_role(
        AssumeRolePolicyDocument=policy_document,
        Path="/",
        RoleName=role_name,
    )
    log.info("create_role_response")
    log.info(create_role_response)

    policy_name = f"policy.{user1['user_id']}"
    log.info(f"policy_name: {policy_name}")

    log.info("putting role policy")
    put_policy_response = iam_client.put_role_policy(
        RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy
    )

    log.info("put_policy_response")
    log.info(put_policy_response)

    auth = Auth(user2, ssl=config.ssl)
    sts_client = auth.do_auth_sts_client()

    log.info("assuming role")
    assume_role_response = sts_client.assume_role(
        RoleArn=create_role_response["Role"]["Arn"],
        RoleSessionName=user1["user_id"],
        DurationSeconds=3600,
    )

    log.info(assume_role_response)

    assumed_role_user_info = {
        "access_key": assume_role_response["Credentials"]["AccessKeyId"],
        "secret_key": assume_role_response["Credentials"]["SecretAccessKey"],
        "session_token": assume_role_response["Credentials"]["SessionToken"],
        "user_id": user2["user_id"],
    }

    log.info("got the credentials after assume role")
    s3client = Auth(assumed_role_user_info, ssl=config.ssl)
    s3_client_rgw = s3client.do_auth()

    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()
    basic_io_structure = BasicIOInfoStructure()
    user_info = basic_io_structure.user(
        **{
            "user_id": assumed_role_user_info["user_id"],
            "access_key": assumed_role_user_info["access_key"],
            "secret_key": assumed_role_user_info["secret_key"],
        }
    )
    write_user_info.add_user_info(user_info)

    buckets_created = []

    if config.test_ops["create_bucket"] is True:
        log.info("no of buckets to create: %s" % config.bucket_count)
        for bc in range(config.bucket_count):
            bucket_name = utils.gen_bucket_name_from_userid(
                assumed_role_user_info["user_id"], rand_no=bc
            )
            log.info("creating bucket with name: %s" % bucket_name)
            bucket = reusable.create_bucket(
                bucket_name, s3_client_rgw, assumed_role_user_info
            )
            buckets_created.append(bucket)

        if config.test_ops["create_object"] is True:
            for bucket in buckets_created:
                # uploading data
                log.info("s3 objects to create: %s" % config.objects_count)
                for oc, size in list(config.mapped_sizes.items()):
                    config.obj_size = size
                    s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
                    log.info("s3 object name: %s" % s3_object_name)
                    s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
                    log.info("s3 object path: %s" % s3_object_path)
                    if config.test_ops.get("upload_type") == "multipart":
                        log.info("upload type: multipart")
                        reusable.upload_mutipart_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )
                    else:
                        log.info("upload type: normal")
                        reusable.upload_object(
                            s3_object_name,
                            bucket,
                            TEST_DATA_PATH,
                            config,
                            assumed_role_user_info,
                        )

    if config.test_ops["server_side_copy"] is True:
        bucket1, bucket2 = buckets_created

        # copy object1 from bucket1 to bucket2 with the same name as in bucket1
        log.info("copying first object from bucket1 to bucket2")
        all_keys_in_buck1 = []
        for obj in bucket1.objects.all():
            all_keys_in_buck1.append(obj.key)
        copy_source = {"Bucket": bucket1.name, "Key": all_keys_in_buck1[0]}
        copy_object_name = all_keys_in_buck1[0] + "_copied_obj"
        log.info(f"copy object name: {copy_object_name}")
        bucket2.copy(copy_source, copy_object_name)

        # list the objects in bucket2
        log.info("listing all objects im bucket2 after copy")
        all_bucket2_objs = []
        for obj in bucket2.objects.all():
            log.info(obj.key)
            all_bucket2_objs.append(obj.key)

        # check for object existence in bucket2
        if copy_object_name in all_bucket2_objs:
            log.info("server side copy successful")
        else:
            raise TestExecError("server side copy operation was not successful")

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")
import os, sys, glob
import json
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
import v2.lib.resource_op as s3lib
import v2.utils.log as log
import v2.utils.utils as utils
from v2.utils.utils import HttpResponseParser
from v2.lib.exceptions import TestExecError
import v2.lib.manage_data as manage_data
from v2.lib.s3.write_io_info import IOInfoInitialize, BasicIOInfoStructure, BucketIoInfo, KeyIoInfo

io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
write_bucket_io_info = BucketIoInfo()
write_key_io_info = KeyIoInfo()


def create_bucket(bucket_name, rgw, user_info):
    log.info('creating bucket with name: %s' % bucket_name)
    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
    bucket = s3lib.resource_op({
        'obj': rgw,
        'resource': 'Bucket',
        'args': [bucket_name]
    })
    created = s3lib.resource_op({
        'obj': bucket,
        'resource': 'create',
        'args': None,
        'extra_info': {
            'access_key': user_info['access_key']
def test_exec(rgw_user_info_file, config):

    test_info = AddTestInfo('Test Basic IO on S3')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())
    write_user_info = AddUserInfo()

    try:

        test_info.started_info()

        with open(rgw_user_info_yaml, 'r') as f:
            rgw_user_info = yaml.load(f)

        mount_point = rgw_user_info['nfs_mnt_point']

        nfs_ganesha = PrepNFSGanesha(rgw_user_info_file=rgw_user_info_file)

        mounted = nfs_ganesha.initialize(write_io_info=False)

        if mounted is False:
            raise TestExecError("mount failed")

        if nfs_ganesha.rgw_user_info[
                'nfs_version'] == 4 and nfs_ganesha.rgw_user_info[
                    'Pseudo'] is not None:
            log.info('nfs version: 4')
            log.info('adding Pseudo path to writable mount point')
            mount_point = os.path.join(mount_point,
                                       nfs_ganesha.rgw_user_info['Pseudo'])
            log.info('writable mount point with Pseudo: %s' % mount_point)

        log.info('authenticating rgw user')

        # authenticate

        auth = Auth(rgw_user_info)
        rgw_conn = auth.do_auth()

        # add user_info io_info yaml file

        user_info_add = basic_io_structure.user(**rgw_user_info)
        write_user_info.add_user_info(user_info_add)

        if config.io_op_config.get('create', None) is True:

            # create buckets

            for bc in range(config.bucket_count):

                bucket_name_to_create = utils.gen_bucket_name_from_userid(
                    rgw_user_info['user_id'], rand_no=bc)

                bucket = s3_reusables.create_bucket(bucket_name_to_create,
                                                    rgw_conn, rgw_user_info)

                # uploading data

                log.info('s3 objects to create: %s' % config.objects_count)

                for oc in range(config.objects_count):

                    s3_object_name = utils.gen_s3_object_name(
                        bucket_name_to_create, oc)

                    s3_reusables.upload_object(s3_object_name, bucket,
                                               TEST_DATA_PATH, config,
                                               rgw_user_info)

            log.info('verification Starts on NFS mount after %s seconds' %
                     SLEEP_TIME)

            time.sleep(SLEEP_TIME)

            read_io_info_on_nfs = ReadIOInfoOnNFS(mount_point)
            read_io_info_on_nfs.yaml_fname = 'io_info.yaml'
            read_io_info_on_nfs.initialize_verify_io()
            read_io_info_on_nfs.verify_if_basedir_created()
            read_io_info_on_nfs.verify_if_files_created()

            log.info('verification complete, data intact')

            created_buckets = read_io_info_on_nfs.base_dirs
            created_objects = read_io_info_on_nfs.files

            if config.io_op_config.get('delete', None) is True:

                log.info('delete operation starts')

                for bucket_name in created_buckets:

                    bucket = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Bucket',
                        'args': [os.path.basename(bucket_name)]
                    })  # buckets are base dirs in NFS

                    objects = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'objects',
                        'args': None
                    })

                    log.info('deleting all objects in bucket')

                    objects_deleted = s3lib.resource_op({
                        'obj': objects,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('objects_deleted: %s' % objects_deleted)

                    if objects_deleted is False:
                        raise TestExecError(
                            'Resource execution failed: Object deletion failed'
                        )

                    if objects_deleted is not None:

                        response = HttpResponseParser(objects_deleted[0])

                        if response.status_code == 200:
                            log.info('objects deleted ')

                        else:
                            raise TestExecError("objects deletion failed")

                    else:
                        raise TestExecError("objects deletion failed")

                    log.info('deleting bucket: %s' % bucket.name)

                    bucket_deleted_status = s3lib.resource_op({
                        'obj': bucket,
                        'resource': 'delete',
                        'args': None
                    })

                    log.info('bucket_deleted_status: %s' %
                             bucket_deleted_status)

                    if bucket_deleted_status is not None:

                        response = HttpResponseParser(bucket_deleted_status)

                        if response.status_code == 204:
                            log.info('bucket deleted ')

                        else:
                            raise TestExecError("bucket deletion failed")

                    else:
                        raise TestExecError("bucket deletion failed")

                log.info(
                    'verification on NFS will start after %s seconds for delete operation'
                    % SLEEP_TIME)

                time.sleep(200)

                for basedir in created_buckets:

                    exists = os.path.exists(basedir)

                    log.info('exists status: %s' % exists)

                    if exists is True:
                        raise TestExecError(
                            "Basedir or Basedir: %s not deleted on NFS" %
                            basedir)

                log.info('basedirs deleted')

                for each_file in created_objects:

                    log.info('verifying existence for: %s' % each_file['file'])

                    exists = os.path.exists(each_file['file'])

                    if exists:
                        raise TestExecError("files not created")

                    log.info('file deleted')

                log.info(
                    'verification of files complete, files exists and data intact'
                )

            if config.io_op_config.get('move', None) is True:

                log.info('move operation starts')

                for each_file in created_objects:

                    # in s3 move operation is achieved by copying the same object with the new name and
                    #  deleting the old object

                    log.info('move operation for :%s' % each_file['file'])

                    new_obj_name = os.path.basename(
                        each_file['file']) + ".moved"

                    log.info('new file name: %s' % new_obj_name)

                    new_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [each_file['bucket'], new_obj_name],
                    })

                    new_object.copy_from(
                        CopySource='%s/%s' %
                        (each_file['bucket'],
                         os.path.basename(
                             each_file['file'])))  # old object name

                    old_object = s3lib.resource_op({
                        'obj':
                        rgw_conn,
                        'resource':
                        'Object',
                        'args': [
                            each_file['bucket'],
                            os.path.basename(each_file['file'])
                        ],
                    })
                    old_object.delete()

                    each_file['file'] = os.path.abspath(
                        os.path.join(mount_point, each_file['bucket'],
                                     new_obj_name))

                log.info(
                    'verification on NFS for move operation will start after %s seconds'
                    % SLEEP_TIME)
                time.sleep(SLEEP_TIME)

                read_io_info_on_nfs.verify_if_files_created()

                log.info('move completed, data intact')

        test_info.success_status('test passed')

        sys.exit(0)

    except Exception, e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
Example #30
0
def test_exec(config):

    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    # create user
    config.user_count = 1
    tenant1 = "MountEverest"
    tenant2 = "Himalayas"
    tenant1_user_info = s3lib.create_tenant_users(
        tenant_name=tenant1, no_of_users_to_create=config.user_count)
    tenant1_user1_info = tenant1_user_info[0]
    tenant2_user_info = s3lib.create_tenant_users(
        tenant_name=tenant2, no_of_users_to_create=config.user_count)
    tenant2_user1_info = tenant2_user_info[0]
    tenant1_user1_auth = Auth(tenant1_user1_info, ssl=config.ssl)
    tenant2_user1_auth = Auth(tenant2_user1_info, ssl=config.ssl)
    rgw_tenant1_user1 = tenant1_user1_auth.do_auth()
    rgw_tenant1_user1_c = tenant1_user1_auth.do_auth_using_client()
    rgw_tenant2_user1 = tenant2_user1_auth.do_auth()
    rgw_tenant2_user1_c = tenant2_user1_auth.do_auth_using_client()
    bucket_name1 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=1)
    t1_u1_bucket1 = reusable.create_bucket(
        bucket_name1,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_name2 = utils.gen_bucket_name_from_userid(
        tenant1_user1_info["user_id"], rand_no=2)
    t1_u1_bucket2 = reusable.create_bucket(
        bucket_name2,
        rgw_tenant1_user1,
        tenant1_user1_info,
    )
    bucket_policy_generated = s3_bucket_policy.gen_bucket_policy(
        tenants_list=[tenant1],
        userids_list=[tenant2_user1_info["user_id"]],
        actions_list=["CreateBucket"],
        resources=[t1_u1_bucket1.name],
    )
    bucket_policy = json.dumps(bucket_policy_generated)
    log.info("jsoned policy:%s\n" % bucket_policy)
    log.info("bucket_policy_generated:%s\n" % bucket_policy_generated)
    bucket_policy_obj = s3lib.resource_op({
        "obj": rgw_tenant1_user1,
        "resource": "BucketPolicy",
        "args": [t1_u1_bucket1.name],
    })
    put_policy = s3lib.resource_op({
        "obj":
        bucket_policy_obj,
        "resource":
        "put",
        "kwargs":
        dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy),
    })
    log.info("put policy response:%s\n" % put_policy)
    if put_policy is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if put_policy is not None:
        response = HttpResponseParser(put_policy)
        if response.status_code == 200 or response.status_code == 204:
            log.info("bucket policy created")
        else:
            raise TestExecError("bucket policy creation failed")
    else:
        raise TestExecError("bucket policy creation failed")
    # get policy
    get_policy = rgw_tenant1_user1_c.get_bucket_policy(
        Bucket=t1_u1_bucket1.name)
    log.info("got bucket policy:%s\n" % get_policy["Policy"])
    # modifying bucket policy to take new policy
    if config.bucket_policy_op == "modify":
        # adding new action list: ListBucket to existing action: CreateBucket
        log.info("modifying buckey policy")
        actions_list = ["ListBucket", "CreateBucket"]
        actions = list(map(s3_bucket_policy.gen_action, actions_list))
        bucket_policy2_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=actions_list,
            resources=[t1_u1_bucket1.name],
        )
        bucket_policy2 = json.dumps(bucket_policy2_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=bucket_policy2),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
        get_modified_policy = rgw_tenant1_user1_c.get_bucket_policy(
            Bucket=t1_u1_bucket1.name)
        modified_policy = json.loads(get_modified_policy["Policy"])
        log.info("got bucket policy:%s\n" % modified_policy)
        actions_list_from_modified_policy = modified_policy["Statement"][0][
            "Action"]
        cleaned_actions_list_from_modified_policy = list(
            map(str, actions_list_from_modified_policy))
        log.info("cleaned_actions_list_from_modified_policy: %s" %
                 cleaned_actions_list_from_modified_policy)
        log.info("actions list to be modified: %s" % actions)
        cmp_val = utils.cmp(actions, cleaned_actions_list_from_modified_policy)
        log.info("cmp_val: %s" % cmp_val)
        if cmp_val != 0:
            raise TestExecError("modification of bucket policy failed ")
    if config.bucket_policy_op == "replace":
        log.info("replacing new bucket policy")
        new_policy_generated = s3_bucket_policy.gen_bucket_policy(
            tenants_list=[tenant1],
            userids_list=[tenant2_user1_info["user_id"]],
            actions_list=["ListBucket"],
            resources=[t1_u1_bucket2.name],
        )
        new_policy = json.dumps(new_policy_generated)
        put_policy = s3lib.resource_op({
            "obj":
            bucket_policy_obj,
            "resource":
            "put",
            "kwargs":
            dict(ConfirmRemoveSelfBucketAccess=True, Policy=new_policy),
        })
        log.info("put policy response:%s\n" % put_policy)
        if put_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if put_policy is not None:
            response = HttpResponseParser(put_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("new bucket policy created")
            else:
                raise TestExecError("bucket policy creation failed")
        else:
            raise TestExecError("bucket policy creation failed")
    if config.bucket_policy_op == "delete":
        log.info("in delete bucket policy")
        delete_policy = s3lib.resource_op({
            "obj": bucket_policy_obj,
            "resource": "delete",
            "args": None
        })
        if delete_policy is False:
            raise TestExecError(
                "Resource execution failed: bucket creation faield")
        if delete_policy is not None:
            response = HttpResponseParser(delete_policy)
            if response.status_code == 200 or response.status_code == 204:
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy deletion failed")
        else:
            raise TestExecError("bucket policy deletion failed")
        # confirming once again by calling get_bucket_policy
        try:
            rgw_tenant1_user1_c.get_bucket_policy(Bucket=t1_u1_bucket1.name)
            raise TestExecError("bucket policy did not get deleted")
        except boto3exception.ClientError as e:
            log.info(e.response)
            response = HttpResponseParser(e.response)
            if response.error["Code"] == "NoSuchBucketPolicy":
                log.info("bucket policy deleted")
            else:
                raise TestExecError("bucket policy did not get deleted")
        # log.info('get_policy after deletion: %s' % get_policy)

    # check sync status if a multisite cluster
    reusable.check_sync_status()

    # check for any crashes during the execution
    crash_info = reusable.check_for_crash()
    if crash_info:
        raise TestExecError("ceph daemon crash found!")