def setup(self):
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')
        else:
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) > 0:
                self.cluster_id = clusters[0]['id']
        self.params['cluster_id'] = self.cluster_id
        self.pool_client = PoolsClient(self.params)

        if 'pool_name' in self.env:
            self.pool_name = self.env['pool_name']
        else:
            self.pool_name = self.params.get('pool_name', 'rbd')
        self.params['pool_name'] = self.pool_name
        if self.pool_name is not None:
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == self.pool_name:
                    self.pool_id = resp[i]['id']
        else:
            self.pool_id = test_utils.create_pool(self.params)
            LOG.info("Created pool that id is %s" % self.pool_id)
        self.params['pool_id'] = self.pool_id

        self.rbd_client = RbdClient(self.params)
        self.iscsi_client = ISCSIClient(self.params)
Beispiel #2
0
def get_available_pool_name_and_id(params):
    client = PoolsClient(params)
    pools = client.query()
    if not len(pools):
        raise exceptions.TestFail('No pool found!')
    pool_id = pools[0]['id']
    pool_name = pools[0]['name']
    return pool_name, pool_id
Beispiel #3
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.pool_client = PoolsClient(params)
     self.dstpath = '/root'
     self.workload_path = data_dir.COMMON_TEST_DIR
     self.fio_version = self.params.get('fio_version')
     self.fio_working_path = \
         self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
Beispiel #4
0
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)

        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None
Beispiel #5
0
def get_pool_id(env, params):
    if 'pools' in env:
        pool_id = env['pools']
    else:
        client = PoolsClient(params)
        pools = client.query()
        if not len(pools):
            vgroup_id = env.get('vgroup_id')
            pool_id = create_pool(params, vgroup_id=vgroup_id)
        else:
            pool_id = pools[0]['id']
        env['pools'] = pool_id
    LOG.info("pool_id is %s" % pool_id)
    return pool_id
Beispiel #6
0
def create_pool(params, flag=False, vgroup_id=None):
    """
    Prepare env for testing, this method is to create pool in the cluster

    :param params: the dict-like parameter
    """

    pools_client = PoolsClient(params)
    pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
    LOG.info("Try to create pool %s" % pool_name)
    if not vgroup_id:
        vgroup_id = get_available_vgroup(params)
    if params.get('NO_EC', "true") == "true":
        create_pool = {
            'name': pool_name,
            'size': params.get('size', 2),
            'group_id': params.get('rest_arg_group_id', 1),
            'pg_num': params.get('rest_arg_pg_num', 64),
            'vgroup_id': vgroup_id
        }
    else:
        create_pool = {
            'name': pool_name,
            'group_id': params.get('rest_arg_group_id', 1),
            'pg_num': params.get('rest_arg_pg_num', 64),
            'vgroup_id': vgroup_id,
            'safe_type': params.get('safe_type', 0),
            'data_block_num': params.get('data_block_num', 2),
            'code_block_num': params.get('code_block_num', 0),
            'min_size': params.get('min_size', 1),
            'max_bytes': params.get('max_bytes', 486547056640),
            'write_mode': params.get("write_mode", "writeback"),
        }
    pools_client.create(**create_pool)
    status = wait_for_pool_created(pools_client, pool_name)
    if not status:
        raise exceptions.TestFail('Failed to create pool %s' % pool_name)
    LOG.info('Create pool %s successfully !' % pool_name)
    resp = pools_client.query()
    for i in range(len(resp)):
        if resp[i]['name'] == pool_name:
            if flag:
                return resp[i]
            else:
                return resp[i]['id']
Beispiel #7
0
 def setup(self):
     """
     Set up before executing test
     """
     LOG.info("Try to create cluster cloudtest_cluster")
     create_cluster = {
         'name': self.params.get('cluster_name', 'cloudtest_cluster'),
         'addr': self.params.get('cluster_addr', 'vm')
     }
     resp = self.clusters_client.create(**create_cluster)
     if not resp and utils.verify_response(self.body, resp):
         raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                        self.body)
     self.cluster_id = resp.body.get('id')
     LOG.info("Created cluster successfully!")
     self.params['cluster_id'] = self.cluster_id
     self.servers_client = ServersClient(self.params)
     self.group_client = GroupsClient(self.params)
     self.pool_client = PoolsClient(self.params)
     self.rbd_client = RbdClient(self.params)
     self.osd_client = OsdClient(self.params)
Beispiel #8
0
def create_pool_with_replicate_pg(replicate, pg_num, group_id, params):
    """
    Prepare env for testing, this method is to create pool in the cluster

    :param params: the dict-like parameter
    """

    pools_client = PoolsClient(params)
    pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
    LOG.info("Try to create pool %s" % pool_name)
    create_pool = {
        'name': pool_name,
        'size': replicate,
        'group_id': group_id,
        'pg_num': pg_num
    }
    pools_client.create(**create_pool)
    time.sleep(60)
    resp = pools_client.query()
    for i in range(len(resp)):
        if resp[i]['name'] == pool_name:
            return resp[i]['id']
Beispiel #9
0
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)
        self.server_client = ServersClient(params)
        self.pool_id_before = None
        self.pool_name_before = None
        self.rbd_name_before = None
        self.pool_id_after = None
        self.pool_name_after = None
        self.rbd_name_after = None
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

        self.target_pool = None
        self.rbd_id = None
        self.server_name = None
        self.server_id = None
class TestISCSIMulpath(test.Test):
    """
    Module for testing ISCSI Multipath related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.rbd_client = None
        self.iscsi_client = None
        self.pool_client = None

        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password')
        self.initiator_ip = self.params.get('ceph_node_ip')
        self.initiator_username = self.params.get('ceph_node_ssh_username')
        self.initiator_password = self.params.get('ceph_node_ssh_password')
        self.target_ip = self.params.get('ceph_node_ip')

        self.dirtypoint = "This is an example to check multipath"
        self.mulpath_mountpoint = "/mnt/multipath"
        self.mulpath_filename = "example.txt"
        self.rbd_name = None
        self.rbd_id = None
        self.iscsi_target_id = None
        self.iscsi_target_name = None
        self.iscsi_target_hostip = []
        self.lun_id = None
        self.pool_name = None
        self.pool_id = None
        self.cluster_id = None

    def setup(self):
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')
        else:
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) > 0:
                self.cluster_id = clusters[0]['id']
        self.params['cluster_id'] = self.cluster_id
        self.pool_client = PoolsClient(self.params)

        if 'pool_name' in self.env:
            self.pool_name = self.env['pool_name']
        else:
            self.pool_name = self.params.get('pool_name', 'rbd')
        self.params['pool_name'] = self.pool_name
        if self.pool_name is not None:
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == self.pool_name:
                    self.pool_id = resp[i]['id']
        else:
            self.pool_id = test_utils.create_pool(self.params)
            LOG.info("Created pool that id is %s" % self.pool_id)
        self.params['pool_id'] = self.pool_id

        self.rbd_client = RbdClient(self.params)
        self.iscsi_client = ISCSIClient(self.params)

    def _create_iscsi_target(self):
        self.iscsi_target_name = "cloudtest" + \
                                 utils.utils_misc.generate_random_string(6)
        body = {
            'initiator_ips': self.initiator_ip,
            'target_name': self.iscsi_target_name,
            'multipath': self.params.get('multipath', '3')
        }
        resp = self.iscsi_client.create(**body)
        if not resp and utils.verify_response(body, resp):
            raise exceptions.TestFail("Create target failed: %s" % body)
        self.iscsi_target_hostip = resp['host_ip'].split(',')

        return resp.body['target_id']

    def _create_iscsi_lun(self, target_id, rbd_id):
        body = {
            'target_id': target_id,
            'pool_id': self.pool_id,
            'rbd_id': rbd_id
        }
        resp = self.iscsi_client.add_lun(**body)

        return resp.body['lun_id']

    def _delete_iscsi_lun(self, target_id, lun_id):
        body = {'target_id': target_id, 'lun_id': lun_id}
        self.iscsi_client.delete_lun(**body)

    def _delete_target(self, target_id):
        """
        Test that deletion of delete target
        """
        self.iscsi_client.delete_iscsitarget(target_id)
        resp = self.iscsi_client.query()
        for i in range(len(resp)):
            if resp[i]['target_id'] == target_id:
                raise exceptions.TestFail("Delete target failed")

    def _delete_rbd(self, pool_id, rbd_id):
        """
        Test that deletion of specified rdb
        """
        # delete the rbd created in the right pool
        resp = self.rbd_client.delete_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Delete rbd failed")

    def get_rbd_id(self, pool_id, rbd_name):
        """
        Query a specified rbd in a definitely pool
        
        """
        resp = self.rbd_client.query(pool_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        for i in range(len(resp)):
            if resp[i]['name'] == rbd_name:
                return resp[i]['id']
        return None

    def get_rbd_name(self, pool_id, rbd_id):
        """
        Query a specified rbd in a definitely pool
        
        """
        resp = self.rbd_client.query(pool_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        for i in range(len(resp)):
            if resp[i]['id'] == rbd_id:
                return resp[i]['name']
        return None

    def hit_target(self, control_server_ip, control_username, control_password,
                   initiator_ip, initiator_username, initiator_password):
        for i in range(len(self.iscsi_target_hostip)):
            cmd = ('iscsiadm -m discovery -t st -p %s; ' %
                   self.iscsi_target_hostip[i])
            find, buff = utils.sshclient_execmd(control_server_ip,
                                                control_username,
                                                control_password, initiator_ip,
                                                initiator_username,
                                                initiator_password, cmd)
            if buff.find(self.iscsi_target_name) == -1:
                raise exceptions.TestFail("No specified target found for %s" %
                                          self.iscsi_target_hostip[i])

    def do_iscsi_login(self, control_server_ip, control_username,
                       control_password, initiator_ip, initiator_username,
                       initiator_password, target_ip):
        cmd = ('iscsiadm -m node -T %s -p %s --login; ' %
               (self.iscsi_target_name, target_ip))
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def do_iscsi_logout(self, control_server_ip, control_username,
                        control_password, initiator_ip, initiator_username,
                        initiator_password, target_ip):
        cmd = ('iscsiadm -m node -T %s -p %s --logout; ' %
               (self.iscsi_target_name, target_ip))
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def get_iscsi_count(self, control_server_ip, control_username,
                        control_password, initiator_ip, initiator_username,
                        initiator_password):
        retval = 0
        cmd = ('lsblk -S | wc -l; ')
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        retval = string.atoi(_lines[1], 10)
        return retval

    def get_iscsi_multipath(self, control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password):
        cmd = 'multipath -l; '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        return find, buff

    def get_chars(self, str):
        _str = ""
        for i in str:
            if ((i >= 'a' and i <= 'z') or i == '/' or i == ' '
                    or (i >= 'A' and i <= 'Z')):
                _str += i
        return _str

    def make_iscsi_dirty(self, control_server_ip, control_username,
                         control_password, initiator_ip, initiator_username,
                         initiator_password):
        cmd = 'ls --color=never /dev/mapper/mpath*'
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        if len(_lines) < 2:
            raise exceptions.TestFail("Did not get any mapper device")
        mapper_device = self.get_chars(_lines[1])
        if len(mapper_device) == 0:
            raise exceptions.TestFail("Did not get a valid mapper device name")

        cmd = 'mkdir %s' % (mapper_device)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'mkfs.ext4 %s' % mapper_device
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'mount %s %s' % (mapper_device, self.mulpath_mountpoint)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'echo "%s" > %s/%s' % (self.dirtypoint, self.mulpath_mountpoint,
                                     self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'cat %s/%s' % (self.mulpath_mountpoint, self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def start_iscsi_tgt(self, control_server_ip, control_username,
                        control_password, initiator_ip, initiator_username,
                        initiator_password):
        cmd = 'service tgtd start'
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def stop_iscsi_tgt(self, control_server_ip, control_username,
                       control_password, initiator_ip, initiator_username,
                       initiator_password):
        cmd = 'service tgtd stop'
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def check_iscsi_dirty(self, control_server_ip, control_username,
                          control_password, initiator_ip, initiator_username,
                          initiator_password):
        cmd = 'cat %s/%s' % (self.mulpath_mountpoint, self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        if len(_lines) < 2:
            raise exceptions.TestFail("Did not get info for validation")
        info_val = self.get_chars(_lines[1])
        if self.dirtypoint == info_val:
            LOG.info("Find %s under %s!" %
                     (self.mulpath_filename, self.mulpath_mountpoint))
        else:
            raise exceptions.TestFail(
                "%s not found under %s" %
                (self.mulpath_filename, self.mulpath_mountpoint))

        cmd = 'rm -rf %s/%s' % (self.mulpath_mountpoint, self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = '[ -f %s/%s ] || echo removed' % (self.mulpath_mountpoint,
                                                self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        info_val = self.get_chars(_lines[1])
        if info_val == "removed":
            LOG.info("Removed %s successfully!" % self.mulpath_filename)
        else:
            raise exceptions.TestFail("Removed %s fault!" %
                                      self.mulpath_filename)

    def clean_iscsi_dirty(self, control_server_ip, control_username,
                          control_password, initiator_ip, initiator_username,
                          initiator_password):
        cmd = 'umount %s' % self.mulpath_mountpoint
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def iscsi_actions(self, control_server_ip, control_username,
                      control_password, initiator_ip, initiator_username,
                      initiator_password, target_ip):

        cmd = 'yum -y install iscsi-initiator-utils ; '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        if not (find):
            raise exceptions.TestFail("Install iscsi-initiator-utils fault")

        cmd = 'yum -y install device-mapper-multipath ; '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        if not (find):
            raise exceptions.TestFail("Install device-mapper-multipath fault")

        multipathconf="""defaults{\n    user_friendly_names yes\n""" \
        """    polling_interval 10\n    checker_timeout 120\n    """ \
        """queue_without_daemon no\n}\nblacklist {\n""" \
        """    devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"\n""" \
        """    devnode "^hd[a-z]"\n}\ndevices {\n    device{\n        """ \
        """path_grouping_policy failover\n    }\n}"""
        cmd = 'echo \'%s\' > /etc/multipath.conf' % multipathconf
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

        cmd = 'systemctl start multipathd '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        if not find:
            raise exceptions.TestFail("Start multipath service fault")

        self.hit_target(control_server_ip, control_username, control_password,
                        initiator_ip, initiator_username, initiator_password)

        iscsi_count1 = self.get_iscsi_count(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password)
        #Login iscsi
        self.do_iscsi_login(control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password, self.iscsi_target_hostip[0])
        time.sleep(1)
        iscsi_count2 = self.get_iscsi_count(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password)
        #Check lsblk
        if iscsi_count2 <= iscsi_count1:
            raise exceptions.TestFail("Login target to be first iscsi fault")

        self.do_iscsi_login(control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password, self.iscsi_target_hostip[1])
        time.sleep(1)
        iscsi_count3 = self.get_iscsi_count(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password)
        #Check lsblk
        if iscsi_count3 <= iscsi_count2:
            raise exceptions.TestFail("Login target to be second iscsi fault")

        #Get Multipath
        find, buff = self.get_iscsi_multipath(control_server_ip,
                                              control_username,
                                              control_password, initiator_ip,
                                              initiator_username,
                                              initiator_password)
        #Check Multipath

        #make iscsi dirty
        self.make_iscsi_dirty(control_server_ip, control_username,
                              control_password, initiator_ip,
                              initiator_username, initiator_password)
        time.sleep(1)
        #Stop one tgt
        self.stop_iscsi_tgt(control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password)
        time.sleep(1)

        #Check iscsi dirty
        self.check_iscsi_dirty(control_server_ip, control_username,
                               control_password, initiator_ip,
                               initiator_username, initiator_password)
        time.sleep(1)

        #Start one tgt
        self.start_iscsi_tgt(control_server_ip, control_username,
                             control_password, initiator_ip,
                             initiator_username, initiator_password)
        time.sleep(1)

        #Clean iscsi dirty
        self.clean_iscsi_dirty(control_server_ip, control_username,
                               control_password, initiator_ip,
                               initiator_username, initiator_password)

        #Logout iscsi
        self.do_iscsi_logout(control_server_ip, control_username,
                             control_password, initiator_ip,
                             initiator_username, initiator_password,
                             self.iscsi_target_hostip[1])
        time.sleep(1)
        self.do_iscsi_logout(control_server_ip, control_username,
                             control_password, initiator_ip,
                             initiator_username, initiator_password,
                             self.iscsi_target_hostip[0])
        time.sleep(1)

    def test(self):
        # Create rbd in the pool
        self.rbd_id = test_utils.create_rbd(self.pool_id, self.params)
        if self.rbd_id == None:
            raise exceptions.TestFail("rbd is not existed")
        else:
            LOG.info("RBD id is %d" % self.rbd_id)
            # Create iscsi
            self.iscsi_target_id = self._create_iscsi_target()
            time.sleep(1)
            target_multipath = len(self.iscsi_target_hostip)
            if target_multipath <= 2:
                raise exceptions.TestFail("Multipath is %d" % target_multipath)
            # Bind iscsi to rbd
            self.lun_id = self._create_iscsi_lun(self.iscsi_target_id,
                                                 self.rbd_id)
            time.sleep(1)
            self.iscsi_actions(self.control_server_ip, self.control_username,
                               self.control_password, self.initiator_ip,
                               self.initiator_username,
                               self.initiator_password, self.target_ip)

    def teardown(self):
        if self.lun_id is not None:
            self._delete_iscsi_lun(self.iscsi_target_id, self.lun_id)
        if self.iscsi_target_id is not None:
            self._delete_target(self.iscsi_target_id)
        if self.rbd_id is not None:
            self._delete_rbd(self.pool_id, self.rbd_id)
    def setup(self):
        """
        Set up before executing test
        1. to check if two clusters are available
        2. create one pool: testpool
        3. configure remote backup in the testpool
        """
        # to check if two cluster are available
        clusters = test_utils.get_available_clusters(self.params)
        if len(clusters) < 1:
            raise exceptions.TestSetupFail(
                'There are not enough clusters!')
        elif len(clusters) < 2:
            LOG.info('There are not enough clusters, try to create cluster!')
            self.cluster_id = self._create_cluster()
            self.params['cluster_id'] = self.cluster_id
            self.servers_client = ServersClient(self.params)
            for k, v in self.params.items():
                if 'rest_arg_cluster2_' in k:
                    new_key = k.split('rest_arg_cluster2_')[1]
                    self.create_servers_body[new_key] = v
            self._add_three_hosts()
            self._deploy_cluster()
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) < 2:
                raise exceptions.TestSetupFail(
                    'There are not enough clusters!')

        self.cluster_id = clusters[1]['id']
        self.params['cluster_id'] = self.cluster_id
        for cluster in clusters:
            if cluster['id'] != self.cluster_id:
                self.des_cluster_id = cluster['id']
                self.body['des_cluster_id'] = self.des_cluster_id
                break
        src_host = test_utils.get_available_server_info(self.params,
                                                        self.cluster_id)
        self.src_ip = src_host['publicip']
        self.body['src_ip'] = self.src_ip
        self.src_host_id = src_host['id']
        self.body['src_host_id'] = self.src_host_id
        des_host = test_utils.get_available_server_info(self.params,
                                                        self.des_cluster_id)
        self.des_ip = des_host['publicip']
        self.body['des_ip'] = self.des_ip
        self.des_host_id = des_host['id']
        self.body['des_host_id'] = self.des_host_id

        if self.params.get('pool_id'):
            self.pool_id = self.params.get('pool_id')
        else:
            self.pool_id = test_utils.create_pool(self.params)
            pool_client = PoolsClient(self.params)
            if not test_utils.wait_for_pool_in_state(self.pool_id, pool_client,
                                                     'ready'):
                raise exceptions.TestSetupFail("Failed to creating test pool!")
        self.params['pool_id'] = self.pool_id

        # configure remote backup in testpool
        LOG.info("Try to configure remote backup in pool %s : %s"
                 % (self.pool_id, self.body))
        self.client = RemoteBackupClient(self.params)
        self.client.configure_rbpolicy(**self.body)

        # other pre-conditions
        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password',
                                                'lenovo')
        self.initiator_ip = self.params.get('initiator_ip', self.src_ip)
        self.initiator_username = self.params.get('ceph_node_ssh_username',
                                                  'root')
        self.initiator_password = self.params.get('ceph_node_ssh_password',
                                                  'lenovo')
        # create iscsi client
        self.iscsi_client = ISCSIClient(self.params)
Beispiel #12
0
class TestPool(test.Test):
    """
    Scenario for testing tool related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)

        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

    def setup(self):
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')

        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_passwprd = self.params.get('ceph_node_ssh_password')

        self.ioengine = self.params.get('ioengine', 'rbd')
        self.clientname = self.params.get('clientname', 'admin')
        self.rw = self.params.get('rw', 'write')
        self.bs = self.params.get('bs', '4k')
        self.iodepth = self.params.get('iodepth', 1024)
        self.numjobs = self.params.get('numjobs', 2)
        self.direct = self.params.get('direct', 1)
        self.size = self.params.get('size', '1M')

        self.end_host_ip = test_utils.get_available_host_ip(self.params)

    def _query_pool(self, pool_id, group_id, size, pg_num):
        # Test query pools in a specified cluster
        resp = self.pool_client.query()
        LOG.info("Got all pool %s" % resp)
        if not len(resp) > 0:
            raise exceptions.TestFail("Query pools failed")
        for i in range(len(resp)):
            if resp[i]['id'] == pool_id:
                if resp[i]['group_id'] != group_id:
                    raise exceptions.TestFail("Group id is not expected for "
                                              "pool%s" % pool_id)
                elif resp[i]['pg_num'] != pg_num:
                    raise exceptions.TestFail("Pg_num is not expected for "
                                              "pool%s" % pool_id)
                else:
                    return resp[i]['name']

    def _update_pool(self, pool_id, size, group_id, pg_num):
        """
        Execute the test of updating a pool
        """
        # sleep 60s, otherwise it may raise error about "the pool is not ready"
        pool_name = 'cloudtest_' + utils.utils_misc.generate_random_string(6)
        vgroup_id = test_utils.get_available_vgroup(self.params)

        if self.params.get('NO_EC', "true") == "true":
            update_pool = {
                'name': pool_name,
                'size': size,
                'group_id': group_id,
                'pg_num': pg_num,
                'vgroup_id': vgroup_id
            }
        else:
            update_pool = {
                'name': pool_name,
                'group_id': self.params.get('rest_arg_group_id', 1),
                'pg_num': self.params.get('rest_arg_pg_num', 80),
                'vgroup_id': vgroup_id,
                'safe_type': self.params.get('safe_type', 10),
                'data_block_num': self.params.get('data_block_num', 3),
                'code_block_num': self.params.get('code_block_num', 0),
                'min_size': self.params.get('min_size', 1),
                'max_bytes': self.params.get("max_bytes", 1073741824)
            }
        resp = self.pool_client.update(pool_id, **update_pool)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Update pool failed: %s" % self.body)

        time.sleep(240)

    def _check_specified_rbd(self, pool_id, rbd_id):
        # Test query a specified rdb in a pool
        resp = self.rbd_client.query(pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        return resp['name']

    def _write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=%s -clientname=%s ' % (self.ioengine,
                                                       self.clientname)
        cmd3 = '-pool=%s -rw=%s -bs=%s -iodepth=%s -numjobs=%s -direct=%s ' % \
               (pool_name, self.rw, self.bs, self.iodepth,
                self.numjobs, self.direct)
        cmd4 = '-size=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.size, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd

        LOG.info("===cmd is %s" % cmd)
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)

    def _check_rbd_write(self, pool_id, rbd_name, start, offset):
        status = self._wait_for_write_rbd(pool_id, rbd_name, start, offset)
        if not status:
            raise exceptions.TestFail('Failed to write rbd %s' % rbd_name)
        LOG.info('Write rbd %s successfully !' % rbd_name)

    def _wait_for_write_rbd(self,
                            pool_id,
                            rbd_name,
                            start,
                            offset,
                            timeout=300):
        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            LOG.info("Check used size %s" % resp)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name \
                        and (resp[i]['usedsize'] == 0 or
                                     resp[i]['usedsize'] == offset):
                    LOG.info("usedsize is %s" % resp[i]['usedsize'])
                    LOG.info("start is %s" % start)
                    LOG.info("offset is %s" % offset)
                    return True
            return False

        return utils.utils_misc.wait_for(is_rbd_create,
                                         timeout,
                                         first=0,
                                         step=5,
                                         text='Waiting for rbd %s write.' %
                                         rbd_name)

    def test_edit_pool(self):
        group_id = 1
        # Creating 1M rbd
        RBD_CAPACITY = 1024 * 1024

        self.pool_response = test_utils.create_pool(self.params, flag=True)
        self.pool_name = self.pool_response.get('name')
        self.pool_id = self.pool_response.get('id')
        self.rbd_response = test_utils.create_rbd_with_capacity(
            self.pool_id, self.params, RBD_CAPACITY)
        self.rbd_id = self.rbd_response.get('id')
        self.rbd_name = self.rbd_response.get('name')

        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_passwprd,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)
        self._write_rbd(self.pool_name, self.rbd_name, flag=True)
        self._check_rbd_write(self.pool_id, self.rbd_name, 0, 0)

        # Update the size and pg_num to the pool
        replicate = 2
        pg_num = 80
        self._update_pool(self.pool_id, replicate, group_id, pg_num)
        self.pool_name = \
            self._query_pool(self.pool_id, group_id, replicate, pg_num)

        self._write_rbd(self.pool_name, self.rbd_name, flag=True)
        self._check_rbd_write(self.pool_id, self.rbd_name, 0, 1024 * 1024)

        # Update the group to the pool
        group_id = 1
        self._update_pool(self.pool_id, replicate, group_id, pg_num)
        self.pool_name = \
            self._query_pool(self.pool_id, group_id, replicate, pg_num)

        self._write_rbd(self.pool_name, self.rbd_name, flag=True)
        self._check_rbd_write(self.pool_id, self.rbd_name, 0, 1024 * 1024)

    def teardown(self):
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % (
                os.path.join(self.dstpath, self.fio_version),
                os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_passwprd,
                cmd=cmd,
                cmd_mid=cmd_mid)
        time.sleep(240)
        if self.rbd_id is not None:
            try:
                test_utils.delete_rbd(self.pool_id, self.rbd_id, self.params)
            except exceptions.UnexpectedResponseCode, e:
                pass
Beispiel #13
0
 def __init__(self, params, env):
     self.params = params
     self.client = PoolsClient(params)
     self.body = {}
     self.env = env
Beispiel #14
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.pool_client = PoolsClient(params)
     self.rbd_client = RbdClient(params)
     self.snapshot_client = SnapshotsClient(params)
Beispiel #15
0
class TestGroup(test.Test):
    """
    Test group can separate the data io from customized domain
    """
    def __init__(self, params, env):
        self.params = params
        self.clusters_client = ClustersClient(params)
        self.body = {}
        self.env = env
        self.cluster_id = None
        self.host_group_name = 'host_group_' \
                               + utils_misc.generate_random_string(6)
        self.host_group_id = None
        self.host_group_pool_id = None
        self.host_group_pool_name = None
        self.host_group_rbd_id = None
        self.host_group_rbd_name = None
        self.host_group_servers_id = []
        self.rack_group_name = 'rack_group_' \
                               + utils_misc.generate_random_string(6)
        self.rack_group_id = None
        self.rack_group_pool_id = None
        self.rack_group_pool_name = None
        self.rack_group_rbd_id = None
        self.rack_group_rbd_name = None
        self.rack_group_servers_id = []
        self.dstpath = '/root'
        self.workload_path = data_dir.CEPH_API_SCENARIOS_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        self.mid_host_ip = \
            self.params.get('ceph_management_url').split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.rw = self.params.get('rw', 'randrw')
        self.bs = self.params.get('bs', '8k')
        self.iodepth = self.params.get('iodepth', 128)
        self.runtime = self.params.get('runtime', 120)
        self.rwmixread = self.params.get('rwmixread', 70)
        self.end_host_ip = None

    def setup(self):
        """
        Set up before executing test
        """
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')
        LOG.info("Created cluster successfully!")
        self.params['cluster_id'] = self.cluster_id
        self.servers_client = ServersClient(self.params)
        self.group_client = GroupsClient(self.params)
        self.pool_client = PoolsClient(self.params)
        self.rbd_client = RbdClient(self.params)
        self.osd_client = OsdClient(self.params)

    def _copy_fio_package_to_host(self):
        self.end_host_ip = test_utils.get_available_host_ip(self.params)
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

    def _write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin -pool=%s ' % \
               pool_name
        cmd3 = '-rw=%s -rwmixread=%s -bs=%s -iodepth=%s -numjobs=1 -direct=1 ' % \
               (self.rw, self.rwmixread, self.bs, self.iodepth)
        cmd4 = '-runtime=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.runtime, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        LOG.info("cmd = %s" % cmd)

        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_password,
            cmd=cmd,
            timeout=1000)

    def _create_group(self, name, leaf_firstn):
        group_body = {'name': name, 'max_size': 10, 'leaf_firstn': leaf_firstn}
        resp_body = self.group_client.create_group(**group_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create group policy failed")
        LOG.info("Created group '%s' with id: %s" % (body['name'], body['id']))
        return body['id']

    def _create_bucket(self, group_id):
        create_body = {
            'name': 'cloudtest_bucket_' + utils_misc.generate_random_string(6),
            'type': 'rack'
        }
        resp_body = self.group_client.create_bucket(group_id, **create_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create bucket failed")
        LOG.info("Created bucket '%s' with id: %s" %
                 (body['name'], body['id']))
        return body['id']

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      request_body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

    def _add_three_hosts(self, kwargs):
        body = {}
        for k, v in self.params.items():
            if kwargs in k:
                new_key = k.split(kwargs)[1]
                body[new_key] = v
        LOG.info("body = %s" % body)
        i = 1
        threads = []
        while body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = body.get(tmp)
            tmp = 'parent_bucket_%d' % i
            parent_bucket = body.get(tmp)
            create_server_body = {
                'servername': servername,
                'username': username,
                'passwd': password,
                'publicip': publicip,
                'clusterip': clusterip,
                'parent_bucket': parent_bucket
            }
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            i = i + 1

        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(600)
            except Exception as details:
                LOG.exception(
                    'Caught exception waiting for server %d added : %s' %
                    (i, details))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(
            self.cluster_id, self.clusters_client, 'deployed',
            int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _create_pool(self, group_id):
        pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        LOG.info("Try to create pool %s" % pool_name)
        create_pool = {
            'name': pool_name,
            'size': self.params.get('pool_size', 3),
            'group_id': group_id,
            'pg_num': self.params.get('pg_num', 128)
        }
        resp = self.pool_client.create(**create_pool)
        status = self._wait_for_pool_create(pool_name)
        if not status:
            raise exceptions.TestFail('Failed to create pool %s' % pool_name)
        LOG.info('Create pool %s successfully !' % pool_name)
        pool_id = resp.body['properties']['context']['pool_id']
        return pool_id, pool_name

    def _wait_for_pool_create(self, pool_name, timeout=1000):
        def is_pool_create():
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == pool_name \
                        and resp[i]['state'] == 1 \
                        and resp[i]['size'] == 3 \
                        and resp[i]['pg_num'] == 128:
                    return True
            return False

        return utils_misc.wait_for(is_pool_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for pool %s create.' %
                                   pool_name)

    def _create_rbd(self, pool_id, rbd_name):
        LOG.info("Try to create rbd %s" % rbd_name)
        create_rbd = {
            'name': rbd_name,
            'object_size': self.params.get('object_size', 10),
            'capacity': self.params.get('capacity', 1024 * 1024 * 1024)
        }
        self.rbd_client.create(pool_id, **create_rbd)
        status = self._wait_for_rbd_in_status(pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to create rbd %s!' % rbd_name)
        resp = self.rbd_client.query(pool_id)
        for i in range(len(resp)):
            if resp[i]['name'] == rbd_name:
                return resp[i]['id']
        raise exceptions.TestError('Create rbd %s failed' % rbd_name)

    def _wait_for_rbd_in_status(self, pool_id, rbd_name, status, timeout=300):
        status_map = {'copying': 6, 'ready': 0}

        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name:
                    if resp[i]['status'] == status_map[status]:
                        return True
            return False

        return utils_misc.wait_for(is_rbd_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for rbd %s create.' %
                                   rbd_name)

    def _migrate_rbd(self, src_pool_id, des_pool_id, rbd_id, rbd_name):
        LOG.info("Try to migrate rbd %s" % rbd_name)
        move_rbd = {'target_pool': des_pool_id}
        resp = self.rbd_client.migrate(src_pool_id, rbd_id, **move_rbd)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        status = self._wait_for_rbd_in_status(des_pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to migrate rbd %s!' % rbd_name)
        LOG.info('Migrate rbd %s successfully !' % rbd_name)

    def _get_servers_id(self):
        query_server = {'marker': 0, 'pagesize': 100}
        servers = self.servers_client.query(**query_server)
        if not len(servers) > 0:
            raise exceptions.TestFail("No available server found!")
        for server in servers:
            if server['group']['id'] == str(self.host_group_id):
                self.host_group_servers_id.append(server['id'])
            elif server['group']['id'] == str(self.rack_group_id):
                self.rack_group_servers_id.append(server['id'])
        LOG.info('Host group servers: %s' % self.host_group_servers_id)
        LOG.info('Rack group servers: %s' % self.rack_group_servers_id)

    def _get_osd_capacity(self, server_id):
        resp = self.osd_client.get_osd_capacity(server_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Query osd capacity failed")
        return resp.get('capacityUsed')

    def _get_osd_capacity_within_group(self, group_tag):
        total_capacity_used = 0
        if group_tag in 'host_group_':
            for server_id in self.host_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        elif group_tag in 'rack_group_':
            for server_id in self.rack_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        return total_capacity_used

    def test(self):
        """
        1. Create host group with host level, and add 3 hosts to this group
        2. Create host group with rack level, and add 3 other hosts to this group
        3. Deploy cluster
        4. Create pool in host group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        5. Create pool in rack group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        6. check osd capacity is changed only in the osd within r/w group
        7. Rbd migration: migrate rbd from pool 1 to pool 2,
        and execute FIO r/w, check r/w works ok
        8. Down one host from one group, and then w/r data in other group
        check data r/w in other group works ok
        """
        # Step 1: Create host group with host level, and add 3 hosts
        self.host_group_id = self._create_group(self.host_group_name, 'host')
        host_bucket_id = self._create_bucket(self.host_group_id)
        self.params['host_group_parent_bucket_1'] = host_bucket_id
        self.params['host_group_parent_bucket_2'] = host_bucket_id
        self.params['host_group_parent_bucket_3'] = host_bucket_id
        self._add_three_hosts("host_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.host_group_name)

        # Step 2: Create host group with rack level, and add 3 hosts
        self.rack_group_id = self._create_group(self.rack_group_name, 'rack')
        rack_bucket_id_1 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_2 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_3 = self._create_bucket(self.rack_group_id)
        self.params['rack_group_parent_bucket_1'] = rack_bucket_id_1
        self.params['rack_group_parent_bucket_2'] = rack_bucket_id_2
        self.params['rack_group_parent_bucket_3'] = rack_bucket_id_3
        self._add_three_hosts("rack_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.rack_group_name)

        # Step 3: deploy cluster
        self._deploy_cluster()
        self._get_servers_id()

        # Step 4:create pool in host group, rbd, do FIO r/w, check r/w works ok
        self._copy_fio_package_to_host()
        self.host_group_pool_id, self.host_group_pool_name = \
            self._create_pool(self.host_group_id)
        self.host_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.host_group_rbd_id = self._create_rbd(self.host_group_pool_id,
                                                  self.host_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.host_group_rbd_name, self.host_group_pool_id))
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=True)

        # Step 5:create pool in rack group, rbd, do FIO r/w, check r/w works ok
        self.rack_group_pool_id, self.rack_group_pool_name = \
            self._create_pool(self.rack_group_id)
        self.rack_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.rack_group_rbd_id = self._create_rbd(self.rack_group_pool_id,
                                                  self.rack_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.rack_group_rbd_id, self.rack_group_pool_id))
        capacity_used_before = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("The previous used capacity is %s" % capacity_used_before)
        self._write_rbd(self.rack_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 6:check osd capacity is changed
        # only in the osd within r/w group
        capacity_used_after = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("Later used capacity is %s" % capacity_used_after)
        if capacity_used_after < capacity_used_before * 0.95:
            raise exceptions.TestFail(
                "Do r/w in the osd of rack group, "
                "affect the used capacity of host group!")

        # Step 7:Rbd migration: migrate rbd from pool 1 to pool 2
        self._migrate_rbd(self.rack_group_pool_id, self.host_group_pool_id,
                          self.rack_group_rbd_id, self.rack_group_rbd_name)
        self._write_rbd(self.host_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 8:Down one host from one group,
        # and then w/r data in other group
        test_utils.delete_osd(self.rack_group_servers_id[0], self.params)
        self.servers_client.delete_server(self.rack_group_servers_id[0])
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=False)

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % (
                os.path.join(self.dstpath, self.fio_version),
                os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_password,
                cmd=cmd,
                cmd_mid=cmd_mid)
        if self.host_group_pool_id and self.host_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.host_group_rbd_id)
        if self.host_group_pool_id and self.rack_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.rack_group_rbd_id)
        if self.host_group_pool_id:
            self.pool_client.delete_pool(self.host_group_pool_id)
        if self.rack_group_pool_id:
            self.pool_client.delete_pool(self.rack_group_pool_id)
Beispiel #16
0
class TestRbdQos(test.Test):
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.pool_client = PoolsClient(params)
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]

    def setup(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.cluster_id = self.params.get('cluster_id')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')

        self.end_host_ip = test_utils.get_available_host_ip(self.params)
    def test(self):
        LOG.info('Copy file %s from local to %s' % (self.fio_version,
                                                    self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' % (self.fio_version,
                                                 self.mid_host_ip,
                                                 self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

        self.pool_response = test_utils.create_pool(self.params, flag=True)
        self.pool_name = self.pool_response.get('name')
        self.pool_id = self.pool_response.get('id')

        self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id,
                                                                self.params,
                                                                RBD_CAPACITY)
        self.rbd_id = self.rbd_response.get('id')
        self.rbd_name = self.rbd_response.get('name')

        self.rbd_client = RbdClient(self.params)

        self.params['rbds_id'] = self.rbd_id
        self.params['pool_id'] = self.pool_id
        self.qos_client = QosClient(self.params)

        self.__test_operation(property_type='iops', rw='randwrite', flag=True)
        self.__test_operation(property_type='iops', rw='randread')
        self.__test_operation(property_type='iops', rw='randrw',
                              rw_type='rwmixread', rw_value=70)
        self.__test_operation(property_type='bw', rw='randwrite')
        self.__test_operation(property_type='bw', rw='randread')
        self.__test_operation(property_type='bw', rw='randrw',
                              rw_type='rwmixread', rw_value=70)

    def __test_operation(self, property_type, rw, rw_type=None, rw_value=None, flag=False):
        """
        Run test according to different params.
        :param property_type: iops/bw
        :param rw: randread/read/randwrite/write/randrw/rw
        :param rw_type: rwmixread when rw is randrw/rw
        :param rw_value: 70 mains 7:3 r/w
        :param flag: mark is the first time call this method.
        """
        self.__disable_qos()
        bs = '8k'
        iodepth = 128
        if 'bw' in property_type:
            bs = '1024k'
            iodepth = 512
        LOG.info('******%s rbd before enable qos!******' % rw)
        stdout_msg = self.__write_rbd(rw=rw, rw_type=rw_type,
                                      rw_value=rw_value, bs=bs,
                                      iodepth=iodepth, runtime=120,
                                      flag=flag)
        result_before = self.__get_iops_or_bw(stdout_msg, property_type)
        body = self.__get_qos_body(operation=rw,
                                   property_type=property_type,
                                   value=result_before/2)
        self.__enable_qos(body)
        LOG.info('******%s rbd after enable qos!******' % rw)
        stdout_msg = self.__write_rbd(rw=rw, rw_type=rw_type,
                                      rw_value=rw_value, bs=bs,
                                      iodepth=iodepth, runtime=120)
        result_after = self.__get_iops_or_bw(stdout_msg, property_type)
        self.__check_result(result_before/2, result_after)

    @staticmethod
    def __check_result(before, after):
        """
        :param before: value before update qos
        :param after: 
        :return: 
        """
        temp_value = abs(before - after)
        result = temp_value*100/after
        if result > 10:
            LOG.error('******IOPS Value %s after enabled qos is greater than 10 '
                'percentage, compare with IOPS value %s before enabled qos!******'
                      % (after, before))
        else:
            LOG.info('******Check result successfully******')

    def __get_qos_body(self, operation, property_type, value):
        """
        Return body for update qos.
        :param operation: randread/read/randwrite/write/randrw/rw
        :param property_type: iops/bw
        :param value: value return by analyze msg return by fio
        :return: body
        """
        if 'bw' in property_type:
            if 'KB/s' in self.bw_unit:
                TIMES = 1024
            elif 'MB/s' in self.bw_unit:
                TIMES = 1024*1024
            else:
                raise exceptions.TestFail('Network is too slow '
                                          'because qos cannot set '
                                          'bw value under 100M!')
            if 'read' in operation:
                return {"rbw": value*TIMES}
            elif 'write' in operation:
                return {"wbw": value*TIMES}
            else:
                return {"bw": value*TIMES}
        else:
            if 'read' in operation:
                return {"riops": value}
            elif 'write' in operation:
                return {"wiops": value}
            else:
                return {"iops": value}

    def __disable_qos(self):
        LOG.info('******Disable qos******')
        resp = self.qos_client.disable()
        body = resp.body
        if not body.get('success'):
            raise exceptions.TestFail("Disable qos failed: %s" % body)

    def __enable_qos(self, body):
        LOG.info('******Enable qos******')
        resp = self.qos_client.enable(**body)
        if not utils.verify_response(body, resp.body.get('results')):
            raise exceptions.TestFail("Enable qos failed: %s" % body)

    def __get_iops_or_bw(self, msg, value_type='iops'):
        """
        Analyze msg return by fio.
        :param msg: msg return by fio
        :param value_type: iops/bw
        :return: 
        """
        iops_read = 0
        iops_write = 0
        bw_read = 0
        bw_write = 0
        pat_read = 'read : (.*)'
        pat_write = 'write: (.*)'
        read_msg_list = re.findall(pat_read, msg)
        write_msg_list = re.findall(pat_write, msg)
        if not len(read_msg_list) and not len(write_msg_list):
            raise exceptions.TestError('Msg data or pattern error, '
                                       'please check!')
        if 'bw' in value_type:
            if len(read_msg_list):
                temp = read_msg_list[0].split(',')[1].split('=')[1]
                bw_read = temp[:-4]
                self.bw_unit = temp[-4:]
            if len(write_msg_list):
                temp = write_msg_list[0].split(',')[1].split('=')[1]
                bw_write = temp[:-4]
                self.bw_unit = temp[-4:]
            bw_total = int(float(bw_read)) + int(float(bw_write))
            if bw_write and bw_read:
                return bw_total
            elif bw_read:
                return int(float(bw_read))
            else:
                return int(float(bw_write))
        else:
            if len(read_msg_list):
                iops_read = read_msg_list[0].split(',')[2].split('=')[1]
            if len(write_msg_list):
                iops_write = write_msg_list[0].split(',')[2].split('=')[1]
            iops_total = int(iops_read) + int(iops_write)
            if iops_read and iops_write:
                return int(iops_total)
            elif iops_read:
                return int(iops_read)
            else:
                return int(iops_write)

    def __get_pool_name_and_id(self):
        pools = self.pool_client.query()
        if not len(pools):
            raise exceptions.TestSetupFail('No pool found!')
        self.pool_id = pools[0]['id']
        self.pool_name = pools[0]['name']

    def __write_rbd(self, rw, rw_type, rw_value, bs, iodepth, runtime,
                    flag=False):
        """
        Write rbd via fio
        :param rw: randread/read/randwrite/write/randrw/rw
        :param rw_type: rwmixread/None
        :param rw_value: rw_type value
        :param bs: 
        :param iodepth: 
        :param runtime: 
        :param flag: True/False mains if need tar file
        :return: 
        """
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin -pool=%s ' % \
               self.pool_name
        if rw_type and rw_value:
            cmd3 = '-rw=%s -%s=%s -bs=%s -iodepth=%s -numjobs=1 -direct=1 ' % \
                   (rw, rw_type, rw_value, bs, iodepth)
        else:
            cmd3 = '-rw=%s -bs=%s -iodepth=%s -numjobs=1 -direct=1 ' % \
                   (rw, bs, iodepth)

        cmd4 = '-runtime=%s -group_reporting -rbdname=%s -name=mytest' % \
               (runtime, self.rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        out_msg = remote.run_cmd_between_remotes(
             mid_host_ip=self.mid_host_ip,
             mid_host_user=self.mid_host_user,
             mid_host_password=self.mid_host_password,
             end_host_ip=self.end_host_ip,
             end_host_user=self.end_host_user,
             end_host_password=self.end_host_password,
             cmd=cmd,
             timeout=1000)
        return out_msg

    def teardown(self):
        # delete files
        cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath, self.fio_version))
        cmd1 = 'pkill fio || true; '
        cmd2 = 'rm -rf %s %s' % (os.path.join(self.dstpath, self.fio_version),
                                 os.path.join(self.dstpath, self.fio_working_path))
        cmd = cmd1 + cmd2
        remote.run_cmd_between_remotes(mid_host_ip=self.mid_host_ip,
                                       mid_host_user=self.mid_host_user,
                                       mid_host_password
                                       =self.mid_host_password,
                                       end_host_ip=self.end_host_ip,
                                       end_host_user=self.end_host_user,
                                       end_host_password
                                       =self.end_host_password,
                                       cmd=cmd,
                                       cmd_mid=cmd_mid)
Beispiel #17
0
class ClusterMaintenance(test.Test):
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.server_client = ServersClient(params)
        self.monitor_client = MonitorsClient(params)
        self.pool_client = PoolsClient(params)
        self.osd_client = OsdClient(params)
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]

    def setup(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.cluster_id = self.params.get('cluster_id')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_passwprd = self.params.get('ceph_node_ssh_password')

        self.end_host_ip = test_utils.get_available_host_ip(self.params)

    def test(self):
        """
        1. start maintenance
        2. check osd, mon, agent status
        3. run fio
        5. wait 400s, stop maintenance, run step 2
        6. run step 4
        7. run fio
        """
        self.__copy_file()
        self.__get_available_server()
        self.__start_maintenance()
        status = self.__wait_for_osd_in_status('down')
        if not status:
            raise exceptions.TestFail(
                'Osd status should be down, please check!')
        time.sleep(10)
        self.__check_monitor_status(status='inactive')
        self.__check_service_status(cmd='systemctl status sds-agent',
                                    pat='Active: (.*)',
                                    service_type='agent',
                                    status='dead')
        self.__get_pool_name_and_id()
        self.__create_rbd()
        self.__write_rbd(flag=True)
        osd_dict_before = self.__get_osd_capacity()
        LOG.info('Begin to sleep 300s ...')
        time.sleep(300)

        self.__stop_maintenance()
        status = self.__wait_for_osd_in_status(status='up')
        if not status:
            raise exceptions.TestFail('Osd status should be up, please check!')
        time.sleep(10)
        self.__check_monitor_status(status='active')
        self.__check_service_status(cmd='systemctl status sds-agent',
                                    pat='Active: (.*)',
                                    service_type='agent',
                                    status='running')
        self.__create_rbd()
        self.__write_rbd()
        osd_dict_after = self.__get_osd_capacity()
        self.__check_osd_capacity(osd_dict_before, osd_dict_after)

    def __copy_file(self):
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_passwprd,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

    def __get_osd_capacity(self):
        """
        Get osd capacity var ceph osd df.
        """
        osd_dict = {}
        stdout_msg = remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd='ceph osd df',
            timeout=1000)
        stdout_msg = stdout_msg.strip()
        msg_list = stdout_msg.split('\n')
        for osd in self.osd_list:
            osd_id = osd.get('osdId')
            for msg in msg_list:
                msg = msg.strip()
                msg = msg.split()
                if msg[0].isdigit() and int(msg[0]) == osd_id:
                    osd_dict[osd_id] = float(msg[6])
        return osd_dict

    @staticmethod
    def __check_osd_capacity(osd_dict_before, osd_dict_after):
        """
        Check osd can use after host maintenance 2 hours.
        :param osd_dict_before: osd capacity in this host
               before run fio.
        :param osd_dict_after: osd capacity in this host
               after run fio.
        """
        for key in osd_dict_before.keys():
            if osd_dict_after[key] > osd_dict_before[key]:
                raise exceptions.TestFail('Osd AVAIL increased!')

    def __get_available_server(self):
        self.server_id = test_utils.get_available_server(self.params)

    def __start_maintenance(self):
        LOG.info('Start host maintenance...')
        self.server_client.start_maintenance(self.server_id)

    def __stop_maintenance(self):
        LOG.info('Stop host maintenance...')
        self.server_client.stop_maintenance(self.server_id)

    def __get_pool_name_and_id(self):
        pools = self.pool_client.query()
        if not len(pools):
            raise exceptions.TestSetupFail('No pool found!')
        self.pool_id = pools[0]['id']
        self.pool_name = pools[0]['name']

    def __create_rbd(self):
        resp = test_utils.create_rbd_with_capacity(self.pool_id, self.params,
                                                   RBD_CAPACITY, True)
        self.rbd_id = resp.get('id')
        self.rbd_name = resp.get('name')

    def __check_osd_status(self, status):
        LOG.info('Check osd status ...')
        resp = self.osd_client.get_osd_capacity(self.server_id)
        self.osd_list = resp['osds']
        for i in range(len(self.osd_list)):
            osd = self.osd_list[i]
            osd_name = osd['osdName']
            if osd.get('osdStatus') not in status:
                raise exceptions.TestFail(
                    'Osd %s status error(status: %s), '
                    'status should be %s' %
                    (osd_name, osd.get('osdStatus'), status))
        LOG.info('Check osd status pass !')

    def __wait_for_osd_in_status(self, status):
        def is_in_status():
            resp = self.osd_client.get_osd_capacity(self.server_id)
            self.osd_list = resp['osds']
            for i in range(len(self.osd_list)):
                osd = self.osd_list[i]
                if osd['osdStatus'] not in status:
                    return False
            return True

        return utils_misc.wait_for(is_in_status,
                                   timeout=360,
                                   first=0,
                                   step=30,
                                   text='Waiting for osd in status!')

    def __check_monitor_status(self, status):
        LOG.info('Check monitor status ...')
        resp = self.monitor_client.query(self.cluster_id, self.server_id)
        if len(resp) == 0:
            raise exceptions.TestFail('No minitor on server %s.' %
                                      self.server_id)
        if resp[0]['state'] not in status:
            raise exceptions.TestFail('Monitor state should be %s not %s' %
                                      (status, resp[0]['state']))

    def __check_service_status(self, cmd, pat, service_type, status=None):
        stdout_msg = remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)
        result = re.findall(pat, stdout_msg)
        if 'agent' in service_type:
            if len(result) != 0:
                if status in result[0]:
                    return
                raise exceptions.TestFail('Agent status error !')
        else:
            if len(result) != 0:
                raise exceptions.TestFail('Ceph goes to recovery mode!')

    def __write_rbd(self, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin '
        cmd3 = '-pool=%s -rw=write -bs=1M -iodepth=128 -numjobs=1 -direct=1 ' % \
               self.pool_name
        cmd4 = '-size=1M -group_reporting -rbdname=%s -name=mytest' % \
               self.rbd_name
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)

    def teardown(self):
        # delete files
        cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath, self.fio_version))
        cmd1 = 'pkill fio || true; '
        cmd2 = 'rm -rf %s %s' % (os.path.join(self.dstpath, self.fio_version),
                                 os.path.join(self.dstpath,
                                              self.fio_working_path))
        cmd = cmd1 + cmd2
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            cmd_mid=cmd_mid)
Beispiel #18
0
def delete_pool(pool_id, params):
    LOG.info("Try to delete pool: %s" % pool_id)
    pools_client = PoolsClient(params)
    return pools_client.delete_pool(pool_id)
Beispiel #19
0
class TestPools(test.Test):
    """
    Pools related tests.
    """
    def __init__(self, params, env):
        self.params = params
        self.client = PoolsClient(params)
        self.body = {}
        self.env = env

    def setup(self):
        """
        Set up before executing test
        """
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')

        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def test_create(self):
        """
        Execute the test of creating a pool
        """
        test_utils.update_env_vgroup(self.params, self.env)
        pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        if self.params.get('NO_EC', "true") == "true":
            LOG.info("Try to create NO_EC pool")
            create_pool = {
                'name': pool_name,
                'size': self.params.get('rest_arg_size', 3),
                'group_id': self.params.get('rest_arg_group_id', 1),
                'pg_num': self.params.get('rest_arg_pg_num', 448),
                'vgroup_id': self.env.get('vgroup_id', 1)
            }
        else:
            LOG.info("Try to create EC pool")
            create_pool = {
                'name': pool_name,
                'group_id': self.params.get('rest_arg_group_id', 1),
                'pg_num': self.params.get('rest_arg_pg_num', 448),
                'vgroup_id': self.env.get('vgroup_id', 1),
                'safe_type': self.params.get('safe_type', 0),
                'data_block_num': self.params.get('data_block_num', 3),
                'code_block_num': self.params.get('code_block_num', 0),
                'min_size': self.params.get('min_size', 1),
                'max_bytes': self.params.get("max_bytes", 486547056640),
                'write_mode': self.params.get("write_mode", "writeback"),
            }
        resp = self.client.create(**create_pool)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Create pool failed: %s" % self.body)
        self.env['pool_tmp_id'] = resp.body['properties']['context']['pool_id']

        status = test_utils.wait_for_pool_created(self.client, pool_name)
        if not status:
            raise exceptions.TestFail('Failed to create pool %s' % pool_name)
        LOG.info('Create pool %s successfully !' % pool_name)

    def test_query(self):
        # Test query pools in a specified cluster
        resp = self.client.query()
        if not len(resp) > 0:
            raise exceptions.TestFail("Query pools failed")
        LOG.info("Got all pools: %s" % resp)

    def test_set_ec_pool_cache(self):
        """
        Set up cache for EC pool
        """
        pool_id = self.env.get('pool_tmp_id')
        vgroup_id = self.env.get('vgroup_id', 1)
        cache_pool = test_utils.create_pool(self.params,
                                            flag=True,
                                            vgroup_id=vgroup_id)
        self.env['cache_pool_id'] = cache_pool.get('pool_id')
        self.env['cache_pool_name'] = cache_pool.get('name')

        if self.params.get('NO_EC', "true") == "true":
            raise exceptions.TestSkipError("There is not EC pool")
        else:
            set_cache = {
                'cache_pool_id': cache_pool.get('pool_id'),
                'cache_pool_name': cache_pool.get('name'),
                'cache_size': 107374182400,
                'target_dirty_radio': 30,
                'target_full_radio': 70,
                'option': 'set_cache',
                'caching_mode': 'writeback',
            }

        resp = self.client.set_cache(pool_id, **set_cache)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Set up EC pool cache failed: %s" %
                                      self.body)

    def test_unset_ec_pool_cache(self):
        """
        Unset cache for EC pool
        """
        pool_id = self.env.get('pool_tmp_id')

        if self.params.get('NO_EC', "true") == "true":
            raise exceptions.TestSkipError("There is not EC pool")
        else:
            unset_cache = {
                'cache_pool_id': self.env['cache_pool_id'],
                'cache_pool_name': self.env['cache_pool_name'],
                'option': 'unset_cache',
            }

        resp = self.client.unset_cache(pool_id, **unset_cache)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Unset up EC pool cache failed: %s" %
                                      self.body)

    def test_update(self):
        """
        Execute the test of updating a pool
        """
        pool_id = self.env.get('pool_tmp_id')
        pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        if self.params.get('NO_EC', "true") == "true":
            update_pool = {
                'name': pool_name,
                'size': self.params.get('rest_arg_size', 3),
                'group_id': self.params.get('rest_arg_group_id', 1),
                'pg_num': self.params.get('rest_arg_pg_num', 600),
                'vgroup_id': self.env.get('vgroup_id', 1)
            }
        else:
            update_pool = {
                'name': pool_name,
                'group_id': self.params.get('rest_arg_group_id', 1),
                'pg_num': self.params.get('rest_arg_pg_num', 448),
                'vgroup_id': self.env.get('vgroup_id', 1),
                'safe_type': self.params.get('safe_type', 0),
                'data_block_num': self.params.get('data_block_num', 3),
                'code_block_num': self.params.get('code_block_num', 0),
                'min_size': self.params.get('min_size', 1),
                'max_bytes': self.params.get("max_bytes", 1073741824),
                'write_mode': self.params.get("write_mode", "writeback"),
            }
        resp = self.client.update(pool_id, **update_pool)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Update pool failed: %s" % self.body)

    def test_delete(self):
        """
        Test that deletion of specified cluster
        """
        # sleep 60s, otherwise it may raise error about "the pool is not ready"
        time.sleep(120)
        pool_id = self.env.get('pool_tmp_id')
        LOG.info("Try to delete pool with ID: %s" % pool_id)
        self.client.delete_pool(pool_id)
        resp = self.client.query()
        for i in range(len(resp)):
            if resp[i]['id'] == pool_id:
                raise exceptions.TestFail("Delete pools failed")

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        pass