Exemplo n.º 1
0
def create_rbd_with_capacity(pool_id, params, capacity, flag=True):
    """
      Prepare env for testing, this method is to create rbd in the pool

      :param params: the dict-like parameter
      :param flag: True/False, with different return value
    """

    rbd_client = RbdClient(params)
    rbd_name = 'cloudtest_' + utils_misc.generate_random_string(6)
    LOG.info("Try to create rbd %s" % rbd_name)
    create_rbd = {
        'name': rbd_name,
        'object_size': params.get('object_size', 10),
        'capacity': params.get('capacity', capacity),
        'num': params.get('num', 1),
        'shared': params.get('shared', 0)
    }
    rbd_client.create(pool_id, **create_rbd)
    status = wait_for_rbd_create(rbd_client, pool_id, rbd_name)
    if not status:
        raise exceptions.TestFail('Failed to create rbd %s!' % rbd_name)
    resp = rbd_client.query(pool_id)
    for i in range(len(resp)):
        if resp[i]['name'] == rbd_name:
            if flag:
                return resp[i]
            else:
                return resp[i]['id']
    raise exceptions.TestError('Create rbd %s failed' % rbd_name)
Exemplo n.º 2
0
    def setup(self):
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')
        else:
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) > 0:
                self.cluster_id = clusters[0]['id']
        self.params['cluster_id'] = self.cluster_id
        self.pool_client = PoolsClient(self.params)

        if 'pool_name' in self.env:
            self.pool_name = self.env['pool_name']
        else:
            self.pool_name = self.params.get('pool_name', 'rbd')
        self.params['pool_name'] = self.pool_name
        if self.pool_name is not None:
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == self.pool_name:
                    self.pool_id = resp[i]['id']
        else:
            self.pool_id = test_utils.create_pool(self.params)
            LOG.info("Created pool that id is %s" % self.pool_id)
        self.params['pool_id'] = self.pool_id

        self.rbd_client = RbdClient(self.params)
        self.iscsi_client = ISCSIClient(self.params)
Exemplo n.º 3
0
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)

        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None
Exemplo n.º 4
0
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.rbd_client = RbdClient(params)
        self.snapshot_client = SnapshotsClient(params)
        self.iscsi_client = ISCSIClient(params)

        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password')
        self.initiator_username = self.params.get('ceph_node_ssh_username')
        self.initiator_password = self.params.get('ceph_node_ssh_password')

        self.rbd_id = None
        self.snapshot_id = None
        self.target_id = None
        self.lun_id = None
Exemplo n.º 5
0
 def setup(self):
     """
     Set up before executing test
     """
     LOG.info("Try to create cluster cloudtest_cluster")
     create_cluster = {
         'name': self.params.get('cluster_name', 'cloudtest_cluster'),
         'addr': self.params.get('cluster_addr', 'vm')
     }
     resp = self.clusters_client.create(**create_cluster)
     if not resp and utils.verify_response(self.body, resp):
         raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                        self.body)
     self.cluster_id = resp.body.get('id')
     LOG.info("Created cluster successfully!")
     self.params['cluster_id'] = self.cluster_id
     self.servers_client = ServersClient(self.params)
     self.group_client = GroupsClient(self.params)
     self.pool_client = PoolsClient(self.params)
     self.rbd_client = RbdClient(self.params)
     self.osd_client = OsdClient(self.params)
Exemplo n.º 6
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.body = {}
     self.rbd_client = RbdClient(params)
     self.iscsi_client = ISCSIClient(self.params)
     self.control_server_ip = self.params.get('ceph_management_url')
     self.control_server_ip = self.control_server_ip.split(':')[1].strip(
         '/')
     self.control_username = self.params.get('ceph_server_ssh_username',
                                             'root')
     self.control_password = self.params.get('ceph_server_ssh_password')
     self.initiator_username = self.params.get('ceph_node_ssh_username')
     self.initiator_password = self.params.get('ceph_node_ssh_password')
     self.cluster_id = None
     self.pool_id = None
     self.target_pool = None
     self.rbd_id = None
     self.rbd_name = None
     self.iscsi_id = None
     self.initiator_ip = None
     self.iscsi_to_delete = []
     self.env['pool_tmp_id'] = None
Exemplo n.º 7
0
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)
        self.server_client = ServersClient(params)
        self.pool_id_before = None
        self.pool_name_before = None
        self.rbd_name_before = None
        self.pool_id_after = None
        self.pool_name_after = None
        self.rbd_name_after = None
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

        self.target_pool = None
        self.rbd_id = None
        self.server_name = None
        self.server_id = None
Exemplo n.º 8
0
    def test(self):
        LOG.info('Copy file %s from local to %s' % (self.fio_version,
                                                    self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' % (self.fio_version,
                                                 self.mid_host_ip,
                                                 self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

        self.pool_response = test_utils.create_pool(self.params, flag=True)
        self.pool_name = self.pool_response.get('name')
        self.pool_id = self.pool_response.get('id')

        self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id,
                                                                self.params,
                                                                RBD_CAPACITY)
        self.rbd_id = self.rbd_response.get('id')
        self.rbd_name = self.rbd_response.get('name')

        self.rbd_client = RbdClient(self.params)

        self.params['rbds_id'] = self.rbd_id
        self.params['pool_id'] = self.pool_id
        self.qos_client = QosClient(self.params)

        self.__test_operation(property_type='iops', rw='randwrite', flag=True)
        self.__test_operation(property_type='iops', rw='randread')
        self.__test_operation(property_type='iops', rw='randrw',
                              rw_type='rwmixread', rw_value=70)
        self.__test_operation(property_type='bw', rw='randwrite')
        self.__test_operation(property_type='bw', rw='randread')
        self.__test_operation(property_type='bw', rw='randrw',
                              rw_type='rwmixread', rw_value=70)
Exemplo n.º 9
0
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.rbd_client = RbdClient(params)
        self.iscsi_client = ISCSIClient(params)

        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password')

        self.initiator_username = self.params.get('ceph_node_ssh_username')
        self.initiator_password = self.params.get('ceph_node_ssh_password')

        self.old_config_list = []
        self.old_config_list.append(
            self.params.get('iscsi_config_authmethod',
                            '#node.session.auth.authmethod = CHAP'))
        self.old_config_list.append(
            self.params.get('iscsi_config_username',
                            '#node.session.auth.username = username'))
        self.old_config_list.append(
            self.params.get('iscsi_config_password',
                            '#node.session.auth.password = password'))
        self.old_config_list.append(
            self.params.get('iscsi_config_username_in',
                            '#node.session.auth.username_in = username_in'))
        self.old_config_list.append(
            self.params.get('iscsi_config_password_in',
                            '#node.session.auth.password_in = password_in'))
        self.iscsi_config_file = self.params.get('iscsi_config_file',
                                                 r'/etc/iscsi/iscsid.conf')
        self.rbd_id = None
        self.target_id = None
        self.lun_id = None
        self.target_ip = None
Exemplo n.º 10
0
class TestRBDMigration(test.Test):
    """
    Module for rbd migration
    """

    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.body = {}
        self.rbd_client = RbdClient(params)
        self.iscsi_client = ISCSIClient(self.params)
        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password')
        self.initiator_username = self.params.get('ceph_node_ssh_username')
        self.initiator_password = self.params.get('ceph_node_ssh_password')
        self.cluster_id = None
        self.pool_id = None
        self.target_pool = None
        self.rbd_id = None
        self.rbd_name = None
        self.iscsi_id = None
        self.initiator_ip = None
        self.iscsi_to_delete = []
        self.env['pool_tmp_id'] = None

    def setup(self):
        """Set up before execute test"""
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')

        if self.params.get("pool_id"):
            self.pool_id = self.params.get('pool_id')
        else:
            self.pool_id = test_utils.create_pool(self.params)
            LOG.info("pool_id id %s " % self.pool_id)

        if self.params.get('initiator_ip'):
            self.initiator_ip = self.params.get('initiator_ip')
        else:
            self.initiator_ip = test_utils.get_available_host_ip(self.params)

        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def _create_rbd(self):
        RBD_CAPACITY = 1024 * 1024
        self.pool_id = test_utils.create_pool(self.params)
        self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id,
                                                                self.params,
                                                                RBD_CAPACITY)
        self.rbd_id = self.rbd_response.get('id')
        self.rbd_name = self.rbd_response.get('name')

    def _write_to_rbd(self):
        """This part is common for three case, use other's method
        Write data to this rbd via ISCSI, ISCSI write data method
           1)Create iscsi > bind iscsi to the rbd   
           2)In target: tgtadm --lld iscsi --mode target --op show
           3)Initator:   iscsiadm -m discovery -t st -p 127.0.0.1 (target address)
           4)iscsiadm -m node -T iqn.2017-04.com.lenovo:devsdb6 -p 127.0.0.1(target address) --login
           5)One iscsi device (like sda) will be in the initator device list via command "lsblk"
           6)mkfs -t ext3 -c /dev/sda
           7)mount /dev/sda /mnt
           8)Create a new file(e.g. testfile.txt) in /mnt > write data to this file
           9)Umount  /mnt
           10)Iscsiadm -m node -T iqn.2017-04.com.lenovo:devsdb6 -p 127.0.0.1(target address) --logout
           11)Unbind iscsi from the rbd"""
        self._redo_some_step(create_iscsi=True, need_mk=True,
                             create_data=True)

    def __create_iscsi(self):
        self.iscsi_name = 'iscsi' + \
                          utils.utils_misc.generate_random_string(6)
        body = {'initiator_ips': self.initiator_ip,
                'target_name': self.iscsi_name,
                'multipath': self.params.get('multipath', '1')}
        LOG.info("Try to create iscsi %s" % self.iscsi_name)
        resp = self.iscsi_client.create(**body)
        # LOG.info("Try to create resp %s" % resp)
        time.sleep(30)
        if resp.response['status'] == '200':
            self.iscsi_id = resp.body['target_id']
            self.iscsi_to_delete.append(self.iscsi_id)
            LOG.info('Create iscsi target Succeed :%s!' % self.iscsi_id)
            return
        raise exceptions.TestFail("Create iscsi target failed!")

    def __bind_iscsi(self):
        LOG.info("Start  bind iscsi to rbd ! ")
        body = {'target_id': self.iscsi_id,
                'pool_id': self.pool_id,
                'rbd_id': self.rbd_id}

        resp = self.iscsi_client.add_lun(**body)
        time.sleep(20)
        # LOG.info("Add lun info resp %s" % resp)
        if resp.response['status'] != '200':
            raise exceptions.TestFail("Bind iscsi to  failed %s!" % resp)
        self.lun_id = resp.body['lun_id']
        LOG.info("Bind iscsi to rbd info success!")

    def __unbind_iscsi(self):
        LOG.info("Start  unbind iscsi to rbd ! ")
        body = {
            'target_id': self.iscsi_id,
            'lun_id': self.lun_id}
        resp = self.iscsi_client.delete_lun(**body)
        if resp.response['status'] != '200':
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        LOG.info('unbind iscsi succeed!')

    def _migrate_rbd(self):
        """Migrate this rbd to target pool"""
        LOG.info("Start migrate rbd to new pool!")
        self.target_pool = test_utils.create_pool(self.params)
        move_rbd = {'target_pool': str(self.target_pool)}
        resp = self.rbd_client.migrate(self.pool_id, self.rbd_id, **move_rbd)
        # LOG.info('Rest Response: %s' % resp)
        time.sleep(60)
        if resp.response['status'] != '200':
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        self.env['pool_tmp_id'] = self.target_pool
        self.pool_id, self.target_pool = self.target_pool, self.pool_id
        LOG.info("Migrate rbd to new pool success!")

    def _redo_some_step(self, create_iscsi=False, need_mk=False,
                        create_data=False):
        LOG.info("Start repeat some steps in case2,create_iscsi:%s "
                 "need_mk:%s create_data:%s " % (
                     create_iscsi, need_mk, create_data))
        if create_iscsi:
            self.__create_iscsi()
        self.__bind_iscsi()
        LOG.info("Start write data to iscsi via rbd !")
        mount_point = self.params.get('mount_point', '/mnt')
        file_name = self.params.get('file_name', 'testfile.txt')
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        if find:
            LOG.info("Find %s under %s!" % (file_name, mount_point))
        else:
            LOG.error("%s not found under %s" % (file_name, mount_point))
        time.sleep(20)
        self.__unbind_iscsi()
        time.sleep(20)

    def _migrate_rbd_back(self):
        self.pool_id, self.target_pool = self.target_pool, self.pool_id
        LOG.info("Start migrate rbd back to old pool!")
        rbd_id = self.rbd_id
        target_pool = self.pool_id
        pool_id = self.env['pool_tmp_id']
        move_rbd = {'target_pool': str(target_pool)}
        resp = self.rbd_client.migrate(pool_id, rbd_id, **move_rbd)
        time.sleep(60)
        # LOG.info('Rest Response: %s' % resp)
        if resp.response['status'] != '200':
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        LOG.info("Migrate rbd back to old pool succeed!")

    def _check_file(self):
        pass

    def _repeat_steps(self):
        pass

    def test_rbd_migration(self):
        """
        This test basically performs following steps:
            1. Create rbd in test pool (e.g. test rbd)
            2. Write data to this rbd via ISCSI, ISCSI write data method
               Repeat step 1->11 in case2
            3. Migrate this rbd to target pool
            4. Repeat step 1->5, step7 in case2
            5. Check  testfile.txt is exists or not
            6. Migrate this rbd back to original pool
            7. Repeat step 1->5, step7 in case2
            8. Check  testfile.txt is exists or not
        """
        self._create_rbd()
        self._write_to_rbd()
        self._migrate_rbd()
        self._redo_some_step()
        self._migrate_rbd_back()
        self._redo_some_step(create_iscsi=False, need_mk=False,
                             create_data=False)

    def teardown(self):
        LOG.info('Delete the resource we create!')
        time.sleep(10)
        for iscsi_id in self.iscsi_to_delete:
            self.iscsi_client.delete_iscsitarget(iscsi_id)
            time.sleep(20)
        if self.rbd_id:
            self.rbd_client.delete_rbd(self.pool_id, self.rbd_id)
        time.sleep(30)
        if self.env['pool_tmp_id']:
            test_utils.delete_pool(self.env['pool_tmp_id'], self.params)
Exemplo n.º 11
0
class TestRbd(test.Test):
    """
    Module for test rbd related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)
        self.server_client = ServersClient(params)
        self.pool_id_before = None
        self.pool_name_before = None
        self.rbd_name_before = None
        self.pool_id_after = None
        self.pool_name_after = None
        self.rbd_name_after = None
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

        self.target_pool = None
        self.rbd_id = None
        self.server_name = None
        self.server_id = None

    def setup(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.cluster_id = self.params.get('cluster_id')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.ioengine = self.params.get('ioengine', 'rbd')
        self.clientname = self.params.get('clientname', 'admin')
        self.rw = self.params.get('rw', 'write')
        self.bs = self.params.get('bs', '1M')
        self.iodepth = self.params.get('iodepth', 1024)
        self.numjobs = self.params.get('numjobs', 1)
        self.direct = self.params.get('direct', 1)
        self.size = self.params.get('size', '2M')

        #self.pool_id = self.params.get('pool_id', 1)

        self.end_host_ip = test_utils.get_available_host_ip(self.params)

    def test_image_write_read(self):
        RBD_CAPACITY = 10485760

        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

        self.pool_response_before = test_utils.create_pool(self.params,
                                                           flag=True)
        self.pool_name_before = self.pool_response_before.get('name')
        self.pool_id_before = self.pool_response_before.get('id')
        self.rbd_response_before = test_utils.create_rbd_with_capacity(
            self.pool_id_before, self.params, RBD_CAPACITY)
        self.rbd_id_before = self.rbd_response_before.get('id')
        self.rbd_name_before = self.rbd_response_before.get('name')

        self.__write_rbd(self.pool_name_before,
                         self.rbd_name_before,
                         flag=True)
        self.__check_rbd_write(self.pool_id_before, self.rbd_name_before)

        self.server_name = test_utils.add_server(
            self.server_client, self.params.get('rest_arg_servername'),
            self.params.get('rest_arg_username'),
            self.params.get('rest_arg_password'),
            self.params.get('rest_arg_publicip'),
            self.params.get('rest_arg_clusterip'),
            self.params.get('rest_arg_managerip'),
            self.params.get('rest_arg_parent_bucket'))
        LOG.info("added server name is %s" % self.server_name)
        test_utils.expand_cluster(self.cluster_client, self.server_client,
                                  self.cluster_id, self.server_name)

        self.pool_response_after = test_utils.create_pool(self.params,
                                                          flag=True)
        self.pool_name_after = self.pool_response_after.get('name')
        self.pool_id_after = self.pool_response_after.get('id')
        self.rbd_response_after = test_utils.create_rbd_with_capacity(
            self.pool_id_after, self.params, RBD_CAPACITY)
        self.rbd_id_after = self.rbd_response_after.get('id')
        self.rbd_name_after = self.rbd_response_after.get('name')
        self.__write_rbd(self.pool_name_before, self.rbd_name_before)

        self.__check_rbd_write(self.pool_id_before, self.rbd_name_before)

        self.__write_rbd(self.pool_name_after, self.rbd_name_after)
        self.__check_rbd_write(self.pool_id_after, self.rbd_name_after)

    def test_resize_migrage_delaydel(self):
        # Create rbd in the pool
        capacity = 1024 * 1024 * 1
        self.pool_id = test_utils.create_pool(self.params)
        self.rbd_id = test_utils.create_rbd_with_capacity(
            self.pool_id, self.params, capacity, False)
        self._check_specified_rbd_size(self.rbd_id, capacity)

        new_name = 'cloudtest_new' + utils_misc.generate_random_string(6)
        updated_capacity = 1024 * 1024 * 2
        self._update_rdb_capacity(self.rbd_id, new_name, updated_capacity)

        self.target_pool = self._migrate_rbd(self.rbd_id)
        time.sleep(120)
        self._check_rbd_pool(self.rbd_id, self.target_pool)
        self._delay_delete_rbd(self.target_pool, self.rbd_id)
        self._check_delay_delete_rbd_list()

    def _check_specified_rbd_size(self, rbd_id, size):
        # Test query a specified rdb in a pool
        resp = self.rbd_client.query_specified_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        if int(resp['size']) != size:
            raise exceptions.TestFail("The capacity of rbd created is NOT "
                                      "expected")

    def _update_rdb_capacity(self, rbd_id, name, size):
        """
        Execute the test of updating a rbd
        """
        update_rbd = {'name': name, 'object_size': 10, 'capacity': size}
        resp = self.rbd_client.update(self.pool_id, rbd_id, **update_rbd)
        LOG.info('Rest Response: %s' % resp)
        if not resp:
            raise exceptions.TestFail("Update rbd failed")

    def _migrate_rbd(self, rbd_id):
        """
        Test that migration of specified rdb
        """
        target_pool = test_utils.create_pool(self.params)
        move_rbd = {'target_pool': str(target_pool)}
        resp = self.rbd_client.migrate(self.pool_id, rbd_id, **move_rbd)
        LOG.info('Rest Response: %s' % resp)
        if not resp:
            raise exceptions.TestFail("Migarate rbd failed")

        return target_pool

    def _check_rbd_pool(self, rbd_id, expected_pool):
        # Test query a specified rdb in a pool
        resp = self.rbd_client.query_specified_rbd(expected_pool, rbd_id)
        LOG.info(resp)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        if int(resp['pool_id']) != expected_pool:
            raise exceptions.TestFail("rbd %s is not in the expected pool" %
                                      rbd_id)

    def _delay_delete_rbd(self, pool_id, rbd_id):
        """
        Test the delay deletion for rdb
        """
        delay_time = time.strftime("%Y-%m-%d %H:%M:%S",
                                   time.localtime(time.time() + 60 * 60))
        resp = self.rbd_client.delay_delete_rbd(pool_id, rbd_id, delay_time)
        if not len(resp) > 0:
            raise exceptions.TestFail("Failed to set up delayed delete time")

    def _check_delay_delete_rbd_list(self):
        """
        Test the delay deletion for rdb
        """
        resp = self.rbd_client.delay_delete_rbd_list()
        if not len(resp) > 0:
            raise exceptions.TestFail(
                "No delay delete rbd found in the cluster")

    def __write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=%s -clientname=%s ' % (self.ioengine,
                                                       self.clientname)
        cmd3 = '-pool=%s -rw=%s -bs=%s -iodepth=%s -numjobs=%s -direct=%s ' % \
               (pool_name, self.rw, self.bs, self.iodepth,
                self.numjobs, self.direct)
        cmd4 = '-size=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.size, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_password,
            cmd=cmd,
            timeout=1000)

    def __check_rbd_write(self, pool_id, rbd_name):
        status = self.__wait_for_write_rbd(pool_id, rbd_name)
        if not status:
            raise exceptions.TestFail('Failed to write rbd %s' % rbd_name)
        LOG.info('Write rbd %s successfully !' % rbd_name)

    def __wait_for_write_rbd(self, pool_id, rbd_name, timeout=60):
        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name \
                        and resp[i]['usedsize'] >= 0:
                    return True
            return False

        return utils_misc.wait_for(is_rbd_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for rbd %s write.' % rbd_name)

    def teardown(self):
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % \
                   (os.path.join(self.dstpath, self.fio_version),
                    os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_password,
                cmd=cmd,
                cmd_mid=cmd_mid)

        # Delete resource for scenario case14
        if self.rbd_id is not None and self.target_pool is not None:
            try:
                test_utils.delete_rbd(self.target_pool, self.rbd_id,
                                      self.params)
            except exceptions.UnexpectedResponseCode, e:
                pass
        # To do: Currently, all rbd deletion is delay deletion. So, the pool
        # cannot be deleted.
        #if self.target_pool is not None:
        #test_utils.delete_pool(self.target_pool, self.params)

        LOG.info("added server name is %s" % self.server_name)
        if self.server_name is not None:
            self.server_id = test_utils.get_server_id_by_name(
                self.params, self.server_name)
            LOG.info("server id is %s" % self.server_id)
        if self.server_id is not None:
            LOG.info('Begin to sleep 60s ...')
            time.sleep(60)
            test_utils.delete_osd(self.server_id, self.params)
            test_utils.del_server(self.server_client, self.server_id)
Exemplo n.º 12
0
 def __init__(self, params, env):
     self.params = params
     self.client = RbdClient(params)
     self.body = {}
     self.env = env
Exemplo n.º 13
0
class TestRdb(test.Test):
    """
    Rdb related tests.
    """
    def __init__(self, params, env):
        self.params = params
        self.client = RbdClient(params)
        self.body = {}
        self.env = env

    def setup(self):
        """
        Set up before executing test
        """
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')

        self.pool_id = test_utils.get_pool_id(self.env, self.params)

        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def test_create(self):
        """
        Execute the test of creating a rbd
        """
        rbd_name = self.params.get(
            'rbd_name', 'cloudtest_' + utils_misc.generate_random_string(6))
        create_rbd = {
            'name': rbd_name,
            'object_size': self.params.get('object_size', 0),
            'capacity': self.params.get('capacity', 200),
            'num': self.params.get('num', 1),
            'shared': self.params.get('shared', 0)
        }
        resp = self.client.create(self.pool_id, **create_rbd)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Create rbd failed: %s" % self.body)
        self.env['rbd_tmp_name'] = rbd_name
        test_utils.wait_for_rbd_create(self.client, self.pool_id, rbd_name)

    def test_query(self):
        # Test query rdbs in a specified pool
        resp = self.client.query(self.pool_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No rbds found in the pool")
        for i in range(len(resp)):
            if resp[i]['name'] == self.env.get('rbd_tmp_name'):
                self.env['rbd_tmp_id'] = resp[i]['id']
                break

    def test_query_specified_rbd(self):
        # Test query a specified rdb in a pool
        rbd_id = self.env.get('rbd_tmp_id')
        resp = self.client.query_specified_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")

    def test_query_cluster_rbd(self):
        """
        Query all rbds of specified clusters
        """
        response = self.client.query_cluster_rbds()
        if not len(response) > 0:
            raise exceptions.TestFail("No rbds found in cluster:%s" %
                                      self.cluster_id)

    def test_update(self):
        """
        Execute the test of updating a rbd
        """
        rbd_id = self.env.get('rbd_tmp_id')
        # rbd_id = 11
        rbd_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        update_rbd = {
            'name': rbd_name,
            'object_size': self.params.get('rest_arg_object_size', 1),
            'capacity': self.params.get('rest_arg_capacity', 200)
        }
        resp = self.client.update(self.pool_id, rbd_id, **update_rbd)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Update rbd failed: %s" % self.body)
        else:
            self.env['rbd_tmp_name'] = rbd_name

    def test_delete(self):
        """
        Test that deletion of specified rdb
        """
        rbd_id = self.env.get('rbd_tmp_id')
        LOG.info("Try to delete rbd with ID: %d" % rbd_id)
        if self.env.get('pool_target_id') is not None:
            self.pool_id = self.env.get('pool_target_id')
        time.sleep(120)
        # delete the rbd created in the right pool
        resp = self.client.delete_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Delete rbd failed")
        # Fixme delete rbd operation changed to asynchronous
        '''resp = self.client.query(self.pool_id)
        for i in range(len(resp)):
            if resp[i]['id'] == rbd_id:
                raise exceptions.TestFail("Delete rbd failed")'''

    def test_delay_delete(self):
        """
        Test the delay deletion for rdb
        """
        rbd_id = self.env.get('rbd_tmp_id')
        LOG.info("Try to delay delete rbd with ID: %s" % rbd_id)

        delay_time = time.strftime("%Y-%m-%d %H:%M:%S",
                                   time.localtime(time.time() + 60 * 60))
        delay_time = self.params.get('rest_arg_delayed_time', delay_time)
        LOG.info("Delay time is %s" % delay_time)
        resp = self.client.delay_delete_rbd(self.pool_id, rbd_id, delay_time)

        if not len(resp) > 0:
            raise exceptions.TestFail("Failed to set up delayed delete time")

    def test_delay_delete_rbd_list(self):
        """
        Test the delay deletion for rdb
        """
        resp = self.client.delay_delete_rbd_list()
        if not len(resp) > 0:
            raise exceptions.TestFail(
                "No delay delete rbd found in the cluster")

    def test_cancel_delay_delete(self):
        """
        Test to cancel the delay deletion for rdb
        """
        rbd_id = self.env.get('rbd_tmp_id')
        LOG.info("Try to cancel delay delete for rbd %d" % rbd_id)
        self.client.cancel_delay_delete_rbd(self.pool_id, rbd_id)
        resp = self.client.delay_delete_rbd_list()
        for i in range(len(resp)):
            if resp[i]['id'] == rbd_id:
                raise exceptions.TestFail("Cancel delay delete rbd failed")

    def test_copy(self):
        """
        Test copy rbd
        """
        rbd_id = self.env.get('rbd_tmp_id')
        copy_rbd = {'target_pool': self.pool_id}
        resp = self.client.copy_rbd(self.pool_id, rbd_id, **copy_rbd)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Copy rbd failed: %s" % self.body)
        #self.env['copy_pool_target_id'] = target_pool

    def test_migrate(self):
        """
        Test that migration of specified rdb
        """
        rbd_id = self.env.get('rbd_tmp_id')
        vgroup_id = self.env.get('vgroup_id')
        target_pool = test_utils.create_pool(self.params, vgroup_id=vgroup_id)
        time.sleep(60)
        move_rbd = {'target_pool': str(target_pool)}
        resp = self.client.migrate(self.pool_id, rbd_id, **move_rbd)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Migarate rbd failed: %s" % self.body)
        self.env['pool_target_id'] = target_pool

    def test_complete_delete(self):
        """
        Test that complete deletion of specified rdb
        """
        rbd_id = self.env.get('rbd_tmp_id')
        if self.env.get('pool_target_id') is not None:
            self.pool_id = self.env.get('pool_target_id')
        resp = self.client.delete_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Delete rbd failed")

        resp = self.client.recycled_delete_rbd_list()
        find = False
        for i in range(len(resp)):
            if resp[i]['name'] == self.env.get('rbd_tmp_name'):
                find = True
                break
        if not find:
            raise exceptions.TestFail(
                "There isn't deleted rbd in recycle bin.")

        LOG.info("Try to completely delete rbd with ID: %d" % rbd_id)

        # completely delete the rbd created in the right pool

        resp = self.client.complete_delete_rbd(self.pool_id, rbd_id)

        # Bug: API completely delete cannot delete rbds in recycle bin.
        # Workaround: Execute API completely delete once,
        # then execute list recycle rbds twice
        time.sleep(120)

        resp = self.client.recycled_delete_rbd_list()
        resp = self.client.recycled_delete_rbd_list()
        for i in range(len(resp)):
            if resp[i]['name'] == self.env.get('rbd_tmp_name'):
                raise exceptions.TestFail(
                    "Failed to completely delete rbd %s" %
                    self.env.get('rbd_tmp_name'))

    def test_recycled_rbd_list(self):
        """
        Test the rbd list in recycle bin
        """
        resp = self.client.recycled_delete_rbd_list()
        if not len(resp) > 0:
            raise exceptions.TestFail("No delete rbd found in the recycle bin")

    def test_cancel_rdb_deletion(self):
        """
        Test to cancel the deletion for rdb
        """
        rbd_id = self.env.get('rbd_tmp_id')
        LOG.info("Try to cancel rbd %d deletion" % rbd_id)
        if self.env.get('pool_target_id') is not None:
            self.pool_id = self.env.get('pool_target_id')
        self.client.cancel_delete_rbd(self.pool_id, rbd_id)
        resp = self.client.recycled_delete_rbd_list()
        for i in range(len(resp)):
            if resp[i]['id'] == rbd_id:
                raise exceptions.TestFail("Cancel delete rbd failed")

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        pass
Exemplo n.º 14
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.pool_client = PoolsClient(params)
     self.rbd_client = RbdClient(params)
     self.snapshot_client = SnapshotsClient(params)
Exemplo n.º 15
0
class TestRbdClone(test.Test):
    """
    Module for test rbd clone related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)
        self.snapshot_client = SnapshotsClient(params)

    def setup(self):
        self.cluster_id = self.params.get('cluster_id')

    def test(self):
        self.pool_id = test_utils.create_pool(self.params)
        self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id,
                                                         self.params,
                                                        RBD_CAPACITY)
        self.rbd_id=self.rbd_response.get('id')
        self.rbd_name=self.rbd_response.get('name')

        self.__create_snapshot()
        self.__update_rbd(self.rbd_name)
        self.snapshot_id = self.__create_snapshot()
        self.__query_snapshot()
        self.__clone_snapshot()

    def __query_snapshot(self):
        resp = self.snapshot_client.query()
        LOG.info('response  is: %s' % resp)
        body = resp.body
        count = 0
        LOG.info('response body is: %s' % body)
        #resonose body has items and not rbd_id is rbdId
        for i in range(len(body['items'])):
            #id not rbd_id
            if body['items'][i]['rbdId'] == self.rbd_id:
                count = count + 1
        if count != 2:
            raise exceptions.TestFail('Snapshot count %s is wrong !' % count)

    def __clone_snapshot(self):
        body = {}
        rbd_name = 'rbd_clone' + utils_misc.generate_random_string(6)
        body['standalone'] = self.params.get('standalone', 'true')
        body['dest_pool'] = self.params.get('dest_pool', '')
        body['dest_rbd'] = self.params.get('dest_rbd', rbd_name)
        self.snapshot_client.clone(self.snapshot_id, **body)
        status = self.__check_rbd_capacity(body.get('dest_rbd'))
        if not status:
            raise exceptions.TestFail('Clone snapshot failed because capacity'
                                      ' is wrong after clone!')

    def __check_rbd_capacity(self, rbd_name, timeout=100):
        def is_capacity_right():
            resp = self.rbd_client.query(self.pool_id)
            for i in range(len(resp)):
                if resp[i].get('name') == rbd_name:
                    if resp[i].get('capacity') == CAPACITY_MODIFY:
                        return True
            return False
        return utils_misc.wait_for(is_capacity_right,
                                   timeout=timeout, first=0, step=5,
                                   text='Waiting for rbd update!')

    def __update_rbd(self,rbd_name):
        body = {}
        LOG.info('body name is %s' % rbd_name)
        body['name'] = rbd_name
        body['object_size'] = 1
        body['capacity'] = CAPACITY_MODIFY
        self.rbd_client.update(self.pool_id, self.rbd_id, **body)
        status = self.__check_rbd_capacity(body['name'])
        if not status:
            raise exceptions.TestFail('Update rbd capacity failed !')

    def __create_snapshot(self):
        body = {}
        body['cluster_id'] = self.cluster_id
        body['pool_id'] = self.pool_id
        body['rbd_id'] = self.rbd_id
        body['snapshot_name'] = 'cloudtest_snapshot' + \
                                utils_misc.generate_random_string(6)
        resp = self.snapshot_client.create(**body)
        resp = resp.body
        if resp.get('success') is False:
            raise exceptions.TestFail("Create snapshot failed: %s" % body)

        return resp.get('results')['id']

    def teardown(self):
        pass
Exemplo n.º 16
0
class TestSnapshot(test.Test):
    """
    Module for testing snapshot related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.rbd_client = RbdClient(params)
        self.snapshot_client = SnapshotsClient(params)
        self.iscsi_client = ISCSIClient(params)

        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password')
        self.initiator_username = self.params.get('ceph_node_ssh_username')
        self.initiator_password = self.params.get('ceph_node_ssh_password')

        self.rbd_id = None
        self.snapshot_id = None
        self.target_id = None
        self.lun_id = None

    def setup(self):

        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')

        if self.params.get('pool_id'):
            self.pool_id = self.params.get('pool_id')
        else:
            self.pool_id = test_utils.create_pool(self.params)
            LOG.info("pool_id is %s" % self.pool_id)

        if self.params.get('initiator_ip'):
            self.initiator_ip = test_utils.get_available_host_ip(self.params)

    def _create_snapshot(self, rbd_id):
        self.body['cluster_id'] = self.cluster_id
        self.body['pool_id'] = self.pool_id
        self.body['rbd_id'] = rbd_id
        self.body['snapshot_name'] = 'cloudtest_snapshot' + \
                                     utils.utils_misc.generate_random_string(6)
        resp = self.snapshot_client.create(**self.body)
        resp = resp.body
        if resp.get('success') is False:
            raise exceptions.TestFail("Create snapshot failed: %s" % self.body)

        return resp.get('results')['id']

    def _snapshot_rollback(self, rbd_id, snapshot_id):
        self.body['to_snapshot'] = snapshot_id
        self.body['rbd_id'] = rbd_id
        resp = self.snapshot_client.rollback(**self.body)
        resp = resp.body
        if resp.get('success') is not True:
            raise exceptions.TestFail("Rollback snapshot failed: %s" %
                                      self.body)

    def _create_iscsi_target(self):
        self.iscsi_target_name = "cloudtest" + \
                                 utils.utils_misc.generate_random_string(6)
        body = {
            'initiator_ips': self.initiator_ip,
            'target_name': self.iscsi_target_name,
            'multipath': self.params.get('multipath', '3')
        }

        resp = self.iscsi_client.create(**body)
        if not resp and utils.verify_response(body, resp):
            raise exceptions.TestFail("Create target failed: %s" % body)

        return resp.body['target_id']

    def _create_iscsi_lun(self, target_id, rbd_id):
        body = {
            'target_id': target_id,
            'pool_id': self.pool_id,
            'rbd_id': rbd_id
        }
        resp = self.iscsi_client.add_lun(**body)

        return resp.body['lun_id']

    def _delete_iscsi_lun(self, target_id, lun_id):
        body = {'target_id': target_id, 'lun_id': lun_id}

        resp = self.iscsi_client.delete_lun(**body)

    def _delete_target(self, target_id):
        """
        Test that deletion of delete target
        """
        self.iscsi_client.delete_iscsitarget(target_id)
        resp = self.iscsi_client.query()
        for i in range(len(resp)):
            if resp[i]['target_id'] == target_id:
                raise exceptions.TestFail("Delete target failed")

    def _delete_snapshot(self, snapshot_id):
        """
        Test that deletion of specified snapshot.
        """
        resp = self.snapshot_client.delete_snapshot(self.snapshot_id)
        resp = resp.body
        if resp.get('success') is not True:
            raise exceptions.TestFail("Delete snapshot failed!")

    def _delete_rbd(self, pool_id, rbd_id):
        """
        Test that deletion of specified rdb
        """
        # delete the rbd created in the right pool
        resp = self.rbd_client.delete_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Delete rbd failed")

    def test(self):
        # Create rbd in the pool
        self.rbd_id = test_utils.create_rbd(self.pool_id, self.params)

        # Create iscsi
        self.target_id = self._create_iscsi_target()
        # Bind iscsi to rbd
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)

        mount_point = self.params.get('mount_point', '/mnt')
        file_name = self.params.get('file_name', 'testfile.txt')
        self.target_ip = test_utils.get_specified_targetip(
            self.params, self.target_id, 0)
        need_mk = True
        create_data = True
        find = test_utils.operate_iscsi(
            self.control_server_ip, self.control_username,
            self.control_password, self.initiator_ip, self.initiator_username,
            self.initiator_password, self.iscsi_target_name, self.target_ip,
            mount_point, file_name, need_mk, create_data)
        if find:
            LOG.info("Find %s under %s!" % (file_name, mount_point))
        else:
            LOG.error("%s not found under %s" % (file_name, mount_point))
        # Unbind iscsi from the rbd
        self._delete_iscsi_lun(self.target_id, self.lun_id)

        time.sleep(30)
        # Create snapshot with the rbd
        self.snapshot_id = self._create_snapshot(self.rbd_id)
        need_mk = False
        create_data = True
        file_name_2 = self.params.get('file_name_2', 'testfile2.txt')
        find = test_utils.operate_iscsi(
            self.control_server_ip, self.control_username,
            self.control_password, self.initiator_ip, self.initiator_username,
            self.initiator_password, self.iscsi_target_name, self.target_ip,
            mount_point, file_name_2, need_mk, create_data)
        if find:
            LOG.info("Find %s under %s!" % (file_name_2, mount_point))
        else:
            LOG.error("%s not found under %s" % (file_name_2, mount_point))

        time.sleep(30)
        # Rollback  snapshot to this rbd
        self._snapshot_rollback(self.rbd_id, self.snapshot_id)

        # Bind iscsi to the rbd
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)

        time.sleep(30)
        need_mk = False
        create_data = False
        find = test_utils.operate_iscsi(
            self.control_server_ip, self.control_username,
            self.control_password, self.initiator_ip, self.initiator_username,
            self.initiator_password, self.iscsi_target_name, self.target_ip,
            mount_point, file_name_2, need_mk, create_data)
        if find:
            LOG.error("Find %s under %s!" % (file_name_2, mount_point))
        else:
            LOG.info("%s not found under %s is expected!" %
                     (file_name_2, mount_point))

    def teardown(self):
        if self.lun_id is not None:
            self._delete_iscsi_lun(self.target_id, self.lun_id)
        if self.target_id is not None:
            self._delete_target(self.target_id)
        if self.snapshot_id is not None:
            self._delete_snapshot(self.snapshot_id)
        if self.rbd_id is not None:
            try:
                self._delete_rbd(self.pool_id, self.rbd_id)
            except exceptions.UnexpectedResponseCode, e:
                pass
Exemplo n.º 17
0
class TestISCSIMulpath(test.Test):
    """
    Module for testing ISCSI Multipath related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.rbd_client = None
        self.iscsi_client = None
        self.pool_client = None

        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password')
        self.initiator_ip = self.params.get('ceph_node_ip')
        self.initiator_username = self.params.get('ceph_node_ssh_username')
        self.initiator_password = self.params.get('ceph_node_ssh_password')
        self.target_ip = self.params.get('ceph_node_ip')

        self.dirtypoint = "This is an example to check multipath"
        self.mulpath_mountpoint = "/mnt/multipath"
        self.mulpath_filename = "example.txt"
        self.rbd_name = None
        self.rbd_id = None
        self.iscsi_target_id = None
        self.iscsi_target_name = None
        self.iscsi_target_hostip = []
        self.lun_id = None
        self.pool_name = None
        self.pool_id = None
        self.cluster_id = None

    def setup(self):
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')
        else:
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) > 0:
                self.cluster_id = clusters[0]['id']
        self.params['cluster_id'] = self.cluster_id
        self.pool_client = PoolsClient(self.params)

        if 'pool_name' in self.env:
            self.pool_name = self.env['pool_name']
        else:
            self.pool_name = self.params.get('pool_name', 'rbd')
        self.params['pool_name'] = self.pool_name
        if self.pool_name is not None:
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == self.pool_name:
                    self.pool_id = resp[i]['id']
        else:
            self.pool_id = test_utils.create_pool(self.params)
            LOG.info("Created pool that id is %s" % self.pool_id)
        self.params['pool_id'] = self.pool_id

        self.rbd_client = RbdClient(self.params)
        self.iscsi_client = ISCSIClient(self.params)

    def _create_iscsi_target(self):
        self.iscsi_target_name = "cloudtest" + \
                                 utils.utils_misc.generate_random_string(6)
        body = {
            'initiator_ips': self.initiator_ip,
            'target_name': self.iscsi_target_name,
            'multipath': self.params.get('multipath', '3')
        }
        resp = self.iscsi_client.create(**body)
        if not resp and utils.verify_response(body, resp):
            raise exceptions.TestFail("Create target failed: %s" % body)
        self.iscsi_target_hostip = resp['host_ip'].split(',')

        return resp.body['target_id']

    def _create_iscsi_lun(self, target_id, rbd_id):
        body = {
            'target_id': target_id,
            'pool_id': self.pool_id,
            'rbd_id': rbd_id
        }
        resp = self.iscsi_client.add_lun(**body)

        return resp.body['lun_id']

    def _delete_iscsi_lun(self, target_id, lun_id):
        body = {'target_id': target_id, 'lun_id': lun_id}
        self.iscsi_client.delete_lun(**body)

    def _delete_target(self, target_id):
        """
        Test that deletion of delete target
        """
        self.iscsi_client.delete_iscsitarget(target_id)
        resp = self.iscsi_client.query()
        for i in range(len(resp)):
            if resp[i]['target_id'] == target_id:
                raise exceptions.TestFail("Delete target failed")

    def _delete_rbd(self, pool_id, rbd_id):
        """
        Test that deletion of specified rdb
        """
        # delete the rbd created in the right pool
        resp = self.rbd_client.delete_rbd(self.pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Delete rbd failed")

    def get_rbd_id(self, pool_id, rbd_name):
        """
        Query a specified rbd in a definitely pool
        
        """
        resp = self.rbd_client.query(pool_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        for i in range(len(resp)):
            if resp[i]['name'] == rbd_name:
                return resp[i]['id']
        return None

    def get_rbd_name(self, pool_id, rbd_id):
        """
        Query a specified rbd in a definitely pool
        
        """
        resp = self.rbd_client.query(pool_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        for i in range(len(resp)):
            if resp[i]['id'] == rbd_id:
                return resp[i]['name']
        return None

    def hit_target(self, control_server_ip, control_username, control_password,
                   initiator_ip, initiator_username, initiator_password):
        for i in range(len(self.iscsi_target_hostip)):
            cmd = ('iscsiadm -m discovery -t st -p %s; ' %
                   self.iscsi_target_hostip[i])
            find, buff = utils.sshclient_execmd(control_server_ip,
                                                control_username,
                                                control_password, initiator_ip,
                                                initiator_username,
                                                initiator_password, cmd)
            if buff.find(self.iscsi_target_name) == -1:
                raise exceptions.TestFail("No specified target found for %s" %
                                          self.iscsi_target_hostip[i])

    def do_iscsi_login(self, control_server_ip, control_username,
                       control_password, initiator_ip, initiator_username,
                       initiator_password, target_ip):
        cmd = ('iscsiadm -m node -T %s -p %s --login; ' %
               (self.iscsi_target_name, target_ip))
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def do_iscsi_logout(self, control_server_ip, control_username,
                        control_password, initiator_ip, initiator_username,
                        initiator_password, target_ip):
        cmd = ('iscsiadm -m node -T %s -p %s --logout; ' %
               (self.iscsi_target_name, target_ip))
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def get_iscsi_count(self, control_server_ip, control_username,
                        control_password, initiator_ip, initiator_username,
                        initiator_password):
        retval = 0
        cmd = ('lsblk -S | wc -l; ')
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        retval = string.atoi(_lines[1], 10)
        return retval

    def get_iscsi_multipath(self, control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password):
        cmd = 'multipath -l; '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        return find, buff

    def get_chars(self, str):
        _str = ""
        for i in str:
            if ((i >= 'a' and i <= 'z') or i == '/' or i == ' '
                    or (i >= 'A' and i <= 'Z')):
                _str += i
        return _str

    def make_iscsi_dirty(self, control_server_ip, control_username,
                         control_password, initiator_ip, initiator_username,
                         initiator_password):
        cmd = 'ls --color=never /dev/mapper/mpath*'
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        if len(_lines) < 2:
            raise exceptions.TestFail("Did not get any mapper device")
        mapper_device = self.get_chars(_lines[1])
        if len(mapper_device) == 0:
            raise exceptions.TestFail("Did not get a valid mapper device name")

        cmd = 'mkdir %s' % (mapper_device)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'mkfs.ext4 %s' % mapper_device
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'mount %s %s' % (mapper_device, self.mulpath_mountpoint)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'echo "%s" > %s/%s' % (self.dirtypoint, self.mulpath_mountpoint,
                                     self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = 'cat %s/%s' % (self.mulpath_mountpoint, self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def start_iscsi_tgt(self, control_server_ip, control_username,
                        control_password, initiator_ip, initiator_username,
                        initiator_password):
        cmd = 'service tgtd start'
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def stop_iscsi_tgt(self, control_server_ip, control_username,
                       control_password, initiator_ip, initiator_username,
                       initiator_password):
        cmd = 'service tgtd stop'
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def check_iscsi_dirty(self, control_server_ip, control_username,
                          control_password, initiator_ip, initiator_username,
                          initiator_password):
        cmd = 'cat %s/%s' % (self.mulpath_mountpoint, self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        if len(_lines) < 2:
            raise exceptions.TestFail("Did not get info for validation")
        info_val = self.get_chars(_lines[1])
        if self.dirtypoint == info_val:
            LOG.info("Find %s under %s!" %
                     (self.mulpath_filename, self.mulpath_mountpoint))
        else:
            raise exceptions.TestFail(
                "%s not found under %s" %
                (self.mulpath_filename, self.mulpath_mountpoint))

        cmd = 'rm -rf %s/%s' % (self.mulpath_mountpoint, self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        cmd = '[ -f %s/%s ] || echo removed' % (self.mulpath_mountpoint,
                                                self.mulpath_filename)
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        _lines = buff.split('\n')
        info_val = self.get_chars(_lines[1])
        if info_val == "removed":
            LOG.info("Removed %s successfully!" % self.mulpath_filename)
        else:
            raise exceptions.TestFail("Removed %s fault!" %
                                      self.mulpath_filename)

    def clean_iscsi_dirty(self, control_server_ip, control_username,
                          control_password, initiator_ip, initiator_username,
                          initiator_password):
        cmd = 'umount %s' % self.mulpath_mountpoint
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

    def iscsi_actions(self, control_server_ip, control_username,
                      control_password, initiator_ip, initiator_username,
                      initiator_password, target_ip):

        cmd = 'yum -y install iscsi-initiator-utils ; '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        if not (find):
            raise exceptions.TestFail("Install iscsi-initiator-utils fault")

        cmd = 'yum -y install device-mapper-multipath ; '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        if not (find):
            raise exceptions.TestFail("Install device-mapper-multipath fault")

        multipathconf="""defaults{\n    user_friendly_names yes\n""" \
        """    polling_interval 10\n    checker_timeout 120\n    """ \
        """queue_without_daemon no\n}\nblacklist {\n""" \
        """    devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"\n""" \
        """    devnode "^hd[a-z]"\n}\ndevices {\n    device{\n        """ \
        """path_grouping_policy failover\n    }\n}"""
        cmd = 'echo \'%s\' > /etc/multipath.conf' % multipathconf
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)

        cmd = 'systemctl start multipathd '
        find, buff = utils.sshclient_execmd(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password, cmd)
        if not find:
            raise exceptions.TestFail("Start multipath service fault")

        self.hit_target(control_server_ip, control_username, control_password,
                        initiator_ip, initiator_username, initiator_password)

        iscsi_count1 = self.get_iscsi_count(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password)
        #Login iscsi
        self.do_iscsi_login(control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password, self.iscsi_target_hostip[0])
        time.sleep(1)
        iscsi_count2 = self.get_iscsi_count(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password)
        #Check lsblk
        if iscsi_count2 <= iscsi_count1:
            raise exceptions.TestFail("Login target to be first iscsi fault")

        self.do_iscsi_login(control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password, self.iscsi_target_hostip[1])
        time.sleep(1)
        iscsi_count3 = self.get_iscsi_count(control_server_ip,
                                            control_username, control_password,
                                            initiator_ip, initiator_username,
                                            initiator_password)
        #Check lsblk
        if iscsi_count3 <= iscsi_count2:
            raise exceptions.TestFail("Login target to be second iscsi fault")

        #Get Multipath
        find, buff = self.get_iscsi_multipath(control_server_ip,
                                              control_username,
                                              control_password, initiator_ip,
                                              initiator_username,
                                              initiator_password)
        #Check Multipath

        #make iscsi dirty
        self.make_iscsi_dirty(control_server_ip, control_username,
                              control_password, initiator_ip,
                              initiator_username, initiator_password)
        time.sleep(1)
        #Stop one tgt
        self.stop_iscsi_tgt(control_server_ip, control_username,
                            control_password, initiator_ip, initiator_username,
                            initiator_password)
        time.sleep(1)

        #Check iscsi dirty
        self.check_iscsi_dirty(control_server_ip, control_username,
                               control_password, initiator_ip,
                               initiator_username, initiator_password)
        time.sleep(1)

        #Start one tgt
        self.start_iscsi_tgt(control_server_ip, control_username,
                             control_password, initiator_ip,
                             initiator_username, initiator_password)
        time.sleep(1)

        #Clean iscsi dirty
        self.clean_iscsi_dirty(control_server_ip, control_username,
                               control_password, initiator_ip,
                               initiator_username, initiator_password)

        #Logout iscsi
        self.do_iscsi_logout(control_server_ip, control_username,
                             control_password, initiator_ip,
                             initiator_username, initiator_password,
                             self.iscsi_target_hostip[1])
        time.sleep(1)
        self.do_iscsi_logout(control_server_ip, control_username,
                             control_password, initiator_ip,
                             initiator_username, initiator_password,
                             self.iscsi_target_hostip[0])
        time.sleep(1)

    def test(self):
        # Create rbd in the pool
        self.rbd_id = test_utils.create_rbd(self.pool_id, self.params)
        if self.rbd_id == None:
            raise exceptions.TestFail("rbd is not existed")
        else:
            LOG.info("RBD id is %d" % self.rbd_id)
            # Create iscsi
            self.iscsi_target_id = self._create_iscsi_target()
            time.sleep(1)
            target_multipath = len(self.iscsi_target_hostip)
            if target_multipath <= 2:
                raise exceptions.TestFail("Multipath is %d" % target_multipath)
            # Bind iscsi to rbd
            self.lun_id = self._create_iscsi_lun(self.iscsi_target_id,
                                                 self.rbd_id)
            time.sleep(1)
            self.iscsi_actions(self.control_server_ip, self.control_username,
                               self.control_password, self.initiator_ip,
                               self.initiator_username,
                               self.initiator_password, self.target_ip)

    def teardown(self):
        if self.lun_id is not None:
            self._delete_iscsi_lun(self.iscsi_target_id, self.lun_id)
        if self.iscsi_target_id is not None:
            self._delete_target(self.iscsi_target_id)
        if self.rbd_id is not None:
            self._delete_rbd(self.pool_id, self.rbd_id)
Exemplo n.º 18
0
def delete_rbd(pool_id, rbd_id, params):
    LOG.info("Try to delete rbd: %s" % rbd_id)
    rbd_client = RbdClient(params)
    return rbd_client.delete_rbd(pool_id, rbd_id)
Exemplo n.º 19
0
class TestGroup(test.Test):
    """
    Test group can separate the data io from customized domain
    """
    def __init__(self, params, env):
        self.params = params
        self.clusters_client = ClustersClient(params)
        self.body = {}
        self.env = env
        self.cluster_id = None
        self.host_group_name = 'host_group_' \
                               + utils_misc.generate_random_string(6)
        self.host_group_id = None
        self.host_group_pool_id = None
        self.host_group_pool_name = None
        self.host_group_rbd_id = None
        self.host_group_rbd_name = None
        self.host_group_servers_id = []
        self.rack_group_name = 'rack_group_' \
                               + utils_misc.generate_random_string(6)
        self.rack_group_id = None
        self.rack_group_pool_id = None
        self.rack_group_pool_name = None
        self.rack_group_rbd_id = None
        self.rack_group_rbd_name = None
        self.rack_group_servers_id = []
        self.dstpath = '/root'
        self.workload_path = data_dir.CEPH_API_SCENARIOS_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        self.mid_host_ip = \
            self.params.get('ceph_management_url').split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.rw = self.params.get('rw', 'randrw')
        self.bs = self.params.get('bs', '8k')
        self.iodepth = self.params.get('iodepth', 128)
        self.runtime = self.params.get('runtime', 120)
        self.rwmixread = self.params.get('rwmixread', 70)
        self.end_host_ip = None

    def setup(self):
        """
        Set up before executing test
        """
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')
        LOG.info("Created cluster successfully!")
        self.params['cluster_id'] = self.cluster_id
        self.servers_client = ServersClient(self.params)
        self.group_client = GroupsClient(self.params)
        self.pool_client = PoolsClient(self.params)
        self.rbd_client = RbdClient(self.params)
        self.osd_client = OsdClient(self.params)

    def _copy_fio_package_to_host(self):
        self.end_host_ip = test_utils.get_available_host_ip(self.params)
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

    def _write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin -pool=%s ' % \
               pool_name
        cmd3 = '-rw=%s -rwmixread=%s -bs=%s -iodepth=%s -numjobs=1 -direct=1 ' % \
               (self.rw, self.rwmixread, self.bs, self.iodepth)
        cmd4 = '-runtime=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.runtime, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        LOG.info("cmd = %s" % cmd)

        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_password,
            cmd=cmd,
            timeout=1000)

    def _create_group(self, name, leaf_firstn):
        group_body = {'name': name, 'max_size': 10, 'leaf_firstn': leaf_firstn}
        resp_body = self.group_client.create_group(**group_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create group policy failed")
        LOG.info("Created group '%s' with id: %s" % (body['name'], body['id']))
        return body['id']

    def _create_bucket(self, group_id):
        create_body = {
            'name': 'cloudtest_bucket_' + utils_misc.generate_random_string(6),
            'type': 'rack'
        }
        resp_body = self.group_client.create_bucket(group_id, **create_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create bucket failed")
        LOG.info("Created bucket '%s' with id: %s" %
                 (body['name'], body['id']))
        return body['id']

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      request_body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

    def _add_three_hosts(self, kwargs):
        body = {}
        for k, v in self.params.items():
            if kwargs in k:
                new_key = k.split(kwargs)[1]
                body[new_key] = v
        LOG.info("body = %s" % body)
        i = 1
        threads = []
        while body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = body.get(tmp)
            tmp = 'parent_bucket_%d' % i
            parent_bucket = body.get(tmp)
            create_server_body = {
                'servername': servername,
                'username': username,
                'passwd': password,
                'publicip': publicip,
                'clusterip': clusterip,
                'parent_bucket': parent_bucket
            }
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            i = i + 1

        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(600)
            except Exception as details:
                LOG.exception(
                    'Caught exception waiting for server %d added : %s' %
                    (i, details))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(
            self.cluster_id, self.clusters_client, 'deployed',
            int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _create_pool(self, group_id):
        pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        LOG.info("Try to create pool %s" % pool_name)
        create_pool = {
            'name': pool_name,
            'size': self.params.get('pool_size', 3),
            'group_id': group_id,
            'pg_num': self.params.get('pg_num', 128)
        }
        resp = self.pool_client.create(**create_pool)
        status = self._wait_for_pool_create(pool_name)
        if not status:
            raise exceptions.TestFail('Failed to create pool %s' % pool_name)
        LOG.info('Create pool %s successfully !' % pool_name)
        pool_id = resp.body['properties']['context']['pool_id']
        return pool_id, pool_name

    def _wait_for_pool_create(self, pool_name, timeout=1000):
        def is_pool_create():
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == pool_name \
                        and resp[i]['state'] == 1 \
                        and resp[i]['size'] == 3 \
                        and resp[i]['pg_num'] == 128:
                    return True
            return False

        return utils_misc.wait_for(is_pool_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for pool %s create.' %
                                   pool_name)

    def _create_rbd(self, pool_id, rbd_name):
        LOG.info("Try to create rbd %s" % rbd_name)
        create_rbd = {
            'name': rbd_name,
            'object_size': self.params.get('object_size', 10),
            'capacity': self.params.get('capacity', 1024 * 1024 * 1024)
        }
        self.rbd_client.create(pool_id, **create_rbd)
        status = self._wait_for_rbd_in_status(pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to create rbd %s!' % rbd_name)
        resp = self.rbd_client.query(pool_id)
        for i in range(len(resp)):
            if resp[i]['name'] == rbd_name:
                return resp[i]['id']
        raise exceptions.TestError('Create rbd %s failed' % rbd_name)

    def _wait_for_rbd_in_status(self, pool_id, rbd_name, status, timeout=300):
        status_map = {'copying': 6, 'ready': 0}

        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name:
                    if resp[i]['status'] == status_map[status]:
                        return True
            return False

        return utils_misc.wait_for(is_rbd_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for rbd %s create.' %
                                   rbd_name)

    def _migrate_rbd(self, src_pool_id, des_pool_id, rbd_id, rbd_name):
        LOG.info("Try to migrate rbd %s" % rbd_name)
        move_rbd = {'target_pool': des_pool_id}
        resp = self.rbd_client.migrate(src_pool_id, rbd_id, **move_rbd)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        status = self._wait_for_rbd_in_status(des_pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to migrate rbd %s!' % rbd_name)
        LOG.info('Migrate rbd %s successfully !' % rbd_name)

    def _get_servers_id(self):
        query_server = {'marker': 0, 'pagesize': 100}
        servers = self.servers_client.query(**query_server)
        if not len(servers) > 0:
            raise exceptions.TestFail("No available server found!")
        for server in servers:
            if server['group']['id'] == str(self.host_group_id):
                self.host_group_servers_id.append(server['id'])
            elif server['group']['id'] == str(self.rack_group_id):
                self.rack_group_servers_id.append(server['id'])
        LOG.info('Host group servers: %s' % self.host_group_servers_id)
        LOG.info('Rack group servers: %s' % self.rack_group_servers_id)

    def _get_osd_capacity(self, server_id):
        resp = self.osd_client.get_osd_capacity(server_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Query osd capacity failed")
        return resp.get('capacityUsed')

    def _get_osd_capacity_within_group(self, group_tag):
        total_capacity_used = 0
        if group_tag in 'host_group_':
            for server_id in self.host_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        elif group_tag in 'rack_group_':
            for server_id in self.rack_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        return total_capacity_used

    def test(self):
        """
        1. Create host group with host level, and add 3 hosts to this group
        2. Create host group with rack level, and add 3 other hosts to this group
        3. Deploy cluster
        4. Create pool in host group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        5. Create pool in rack group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        6. check osd capacity is changed only in the osd within r/w group
        7. Rbd migration: migrate rbd from pool 1 to pool 2,
        and execute FIO r/w, check r/w works ok
        8. Down one host from one group, and then w/r data in other group
        check data r/w in other group works ok
        """
        # Step 1: Create host group with host level, and add 3 hosts
        self.host_group_id = self._create_group(self.host_group_name, 'host')
        host_bucket_id = self._create_bucket(self.host_group_id)
        self.params['host_group_parent_bucket_1'] = host_bucket_id
        self.params['host_group_parent_bucket_2'] = host_bucket_id
        self.params['host_group_parent_bucket_3'] = host_bucket_id
        self._add_three_hosts("host_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.host_group_name)

        # Step 2: Create host group with rack level, and add 3 hosts
        self.rack_group_id = self._create_group(self.rack_group_name, 'rack')
        rack_bucket_id_1 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_2 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_3 = self._create_bucket(self.rack_group_id)
        self.params['rack_group_parent_bucket_1'] = rack_bucket_id_1
        self.params['rack_group_parent_bucket_2'] = rack_bucket_id_2
        self.params['rack_group_parent_bucket_3'] = rack_bucket_id_3
        self._add_three_hosts("rack_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.rack_group_name)

        # Step 3: deploy cluster
        self._deploy_cluster()
        self._get_servers_id()

        # Step 4:create pool in host group, rbd, do FIO r/w, check r/w works ok
        self._copy_fio_package_to_host()
        self.host_group_pool_id, self.host_group_pool_name = \
            self._create_pool(self.host_group_id)
        self.host_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.host_group_rbd_id = self._create_rbd(self.host_group_pool_id,
                                                  self.host_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.host_group_rbd_name, self.host_group_pool_id))
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=True)

        # Step 5:create pool in rack group, rbd, do FIO r/w, check r/w works ok
        self.rack_group_pool_id, self.rack_group_pool_name = \
            self._create_pool(self.rack_group_id)
        self.rack_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.rack_group_rbd_id = self._create_rbd(self.rack_group_pool_id,
                                                  self.rack_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.rack_group_rbd_id, self.rack_group_pool_id))
        capacity_used_before = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("The previous used capacity is %s" % capacity_used_before)
        self._write_rbd(self.rack_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 6:check osd capacity is changed
        # only in the osd within r/w group
        capacity_used_after = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("Later used capacity is %s" % capacity_used_after)
        if capacity_used_after < capacity_used_before * 0.95:
            raise exceptions.TestFail(
                "Do r/w in the osd of rack group, "
                "affect the used capacity of host group!")

        # Step 7:Rbd migration: migrate rbd from pool 1 to pool 2
        self._migrate_rbd(self.rack_group_pool_id, self.host_group_pool_id,
                          self.rack_group_rbd_id, self.rack_group_rbd_name)
        self._write_rbd(self.host_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 8:Down one host from one group,
        # and then w/r data in other group
        test_utils.delete_osd(self.rack_group_servers_id[0], self.params)
        self.servers_client.delete_server(self.rack_group_servers_id[0])
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=False)

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % (
                os.path.join(self.dstpath, self.fio_version),
                os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_password,
                cmd=cmd,
                cmd_mid=cmd_mid)
        if self.host_group_pool_id and self.host_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.host_group_rbd_id)
        if self.host_group_pool_id and self.rack_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.rack_group_rbd_id)
        if self.host_group_pool_id:
            self.pool_client.delete_pool(self.host_group_pool_id)
        if self.rack_group_pool_id:
            self.pool_client.delete_pool(self.rack_group_pool_id)
Exemplo n.º 20
0
def get_available_rbd(pool_id, params):
    rbd_client = RbdClient(params)
    resp = rbd_client.query(pool_id)
    if not len(resp) > 0:
        return create_rbd(pool_id, params)
    return resp[0]['id']
Exemplo n.º 21
0
class TestPool(test.Test):
    """
    Scenario for testing tool related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)

        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

    def setup(self):
        if 'cluster' in self.env:
            self.cluster_id = self.env['cluster']
        elif self.params.get('cluster_id'):
            self.cluster_id = self.params.get('cluster_id')

        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_passwprd = self.params.get('ceph_node_ssh_password')

        self.ioengine = self.params.get('ioengine', 'rbd')
        self.clientname = self.params.get('clientname', 'admin')
        self.rw = self.params.get('rw', 'write')
        self.bs = self.params.get('bs', '4k')
        self.iodepth = self.params.get('iodepth', 1024)
        self.numjobs = self.params.get('numjobs', 2)
        self.direct = self.params.get('direct', 1)
        self.size = self.params.get('size', '1M')

        self.end_host_ip = test_utils.get_available_host_ip(self.params)

    def _query_pool(self, pool_id, group_id, size, pg_num):
        # Test query pools in a specified cluster
        resp = self.pool_client.query()
        LOG.info("Got all pool %s" % resp)
        if not len(resp) > 0:
            raise exceptions.TestFail("Query pools failed")
        for i in range(len(resp)):
            if resp[i]['id'] == pool_id:
                if resp[i]['group_id'] != group_id:
                    raise exceptions.TestFail("Group id is not expected for "
                                              "pool%s" % pool_id)
                elif resp[i]['pg_num'] != pg_num:
                    raise exceptions.TestFail("Pg_num is not expected for "
                                              "pool%s" % pool_id)
                else:
                    return resp[i]['name']

    def _update_pool(self, pool_id, size, group_id, pg_num):
        """
        Execute the test of updating a pool
        """
        # sleep 60s, otherwise it may raise error about "the pool is not ready"
        pool_name = 'cloudtest_' + utils.utils_misc.generate_random_string(6)
        vgroup_id = test_utils.get_available_vgroup(self.params)

        if self.params.get('NO_EC', "true") == "true":
            update_pool = {
                'name': pool_name,
                'size': size,
                'group_id': group_id,
                'pg_num': pg_num,
                'vgroup_id': vgroup_id
            }
        else:
            update_pool = {
                'name': pool_name,
                'group_id': self.params.get('rest_arg_group_id', 1),
                'pg_num': self.params.get('rest_arg_pg_num', 80),
                'vgroup_id': vgroup_id,
                'safe_type': self.params.get('safe_type', 10),
                'data_block_num': self.params.get('data_block_num', 3),
                'code_block_num': self.params.get('code_block_num', 0),
                'min_size': self.params.get('min_size', 1),
                'max_bytes': self.params.get("max_bytes", 1073741824)
            }
        resp = self.pool_client.update(pool_id, **update_pool)
        LOG.info('Rest Response: %s' % resp)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Update pool failed: %s" % self.body)

        time.sleep(240)

    def _check_specified_rbd(self, pool_id, rbd_id):
        # Test query a specified rdb in a pool
        resp = self.rbd_client.query(pool_id, rbd_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("No specified rbd found in the pool")
        return resp['name']

    def _write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=%s -clientname=%s ' % (self.ioengine,
                                                       self.clientname)
        cmd3 = '-pool=%s -rw=%s -bs=%s -iodepth=%s -numjobs=%s -direct=%s ' % \
               (pool_name, self.rw, self.bs, self.iodepth,
                self.numjobs, self.direct)
        cmd4 = '-size=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.size, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd

        LOG.info("===cmd is %s" % cmd)
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)

    def _check_rbd_write(self, pool_id, rbd_name, start, offset):
        status = self._wait_for_write_rbd(pool_id, rbd_name, start, offset)
        if not status:
            raise exceptions.TestFail('Failed to write rbd %s' % rbd_name)
        LOG.info('Write rbd %s successfully !' % rbd_name)

    def _wait_for_write_rbd(self,
                            pool_id,
                            rbd_name,
                            start,
                            offset,
                            timeout=300):
        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            LOG.info("Check used size %s" % resp)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name \
                        and (resp[i]['usedsize'] == 0 or
                                     resp[i]['usedsize'] == offset):
                    LOG.info("usedsize is %s" % resp[i]['usedsize'])
                    LOG.info("start is %s" % start)
                    LOG.info("offset is %s" % offset)
                    return True
            return False

        return utils.utils_misc.wait_for(is_rbd_create,
                                         timeout,
                                         first=0,
                                         step=5,
                                         text='Waiting for rbd %s write.' %
                                         rbd_name)

    def test_edit_pool(self):
        group_id = 1
        # Creating 1M rbd
        RBD_CAPACITY = 1024 * 1024

        self.pool_response = test_utils.create_pool(self.params, flag=True)
        self.pool_name = self.pool_response.get('name')
        self.pool_id = self.pool_response.get('id')
        self.rbd_response = test_utils.create_rbd_with_capacity(
            self.pool_id, self.params, RBD_CAPACITY)
        self.rbd_id = self.rbd_response.get('id')
        self.rbd_name = self.rbd_response.get('name')

        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_passwprd,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)
        self._write_rbd(self.pool_name, self.rbd_name, flag=True)
        self._check_rbd_write(self.pool_id, self.rbd_name, 0, 0)

        # Update the size and pg_num to the pool
        replicate = 2
        pg_num = 80
        self._update_pool(self.pool_id, replicate, group_id, pg_num)
        self.pool_name = \
            self._query_pool(self.pool_id, group_id, replicate, pg_num)

        self._write_rbd(self.pool_name, self.rbd_name, flag=True)
        self._check_rbd_write(self.pool_id, self.rbd_name, 0, 1024 * 1024)

        # Update the group to the pool
        group_id = 1
        self._update_pool(self.pool_id, replicate, group_id, pg_num)
        self.pool_name = \
            self._query_pool(self.pool_id, group_id, replicate, pg_num)

        self._write_rbd(self.pool_name, self.rbd_name, flag=True)
        self._check_rbd_write(self.pool_id, self.rbd_name, 0, 1024 * 1024)

    def teardown(self):
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % (
                os.path.join(self.dstpath, self.fio_version),
                os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_passwprd,
                cmd=cmd,
                cmd_mid=cmd_mid)
        time.sleep(240)
        if self.rbd_id is not None:
            try:
                test_utils.delete_rbd(self.pool_id, self.rbd_id, self.params)
            except exceptions.UnexpectedResponseCode, e:
                pass