def __init__(self, params, env):
     self.params = params
     self.env = env
     self.body = {}
     self.create_servers_body = {}
     self.cluster_id = None
     self.pool_id = None
     self.target_id = None
     self.rbd_id = None
     self.clusters_client = ClustersClient(self.params)
Beispiel #2
0
def create_cluster(params):
    """
    Prepare env for testing, especially creating cluster, etc

    :param params: the dict-like parameter
    """

    clusters_client = ClustersClient(params)
    cluster_name = 'cloudtest_' + utils_misc.generate_random_string(6)
    LOG.info("Try to create cluster %s" % cluster_name)
    create_cluster = {'name': cluster_name, 'addr': params.get('cluster_addr')}
    resp = clusters_client.create(**create_cluster)
    LOG.info(resp)
    return resp.body['id']
    def __init__(self, params, env):
        self.params = params
        self.clusterconf_client = ClustersConfClient(params)
        self.zabbix_client = ZabbixClient(params)
        self.cluster_client = ClustersClient(params)
        self.server_client = ServersClient(params)
        self.body = {}
        self.env = env
        self.pool_id = None
        self.pool_name = None
        self.recover_item_id = None
        self.statistical_time = self.params.get('statistical_time', 1800)
        self.interval_time = self.params.get('interval_time', 30)
        self.rbds_id = []
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path =\
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        self.mid_host_ip =\
            self.params.get('ceph_management_url').split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.end_host_ip = test_utils.get_available_host_ip(self.params)

        self.server_name = None
        self.server_id = None
Beispiel #4
0
 def __init__(self, params, env):
     self.params = params
     self.body = {}
     self.env = env
     self.created_resources = {}
     self.clusters_client = ClustersClient(self.params)
     self.cluster_id = None
     self.lun_id = None
     self.gateway_id = None
Beispiel #5
0
 def __init__(self, params, env):
     self.params = params
     self.clusters_client = ClustersClient(params)
     self.body = {}
     self.env = env
     self.cluster_id = None
     self.host_group_name = 'host_group_' \
                            + utils_misc.generate_random_string(6)
     self.host_group_id = None
     self.host_group_pool_id = None
     self.host_group_pool_name = None
     self.host_group_rbd_id = None
     self.host_group_rbd_name = None
     self.host_group_servers_id = []
     self.rack_group_name = 'rack_group_' \
                            + utils_misc.generate_random_string(6)
     self.rack_group_id = None
     self.rack_group_pool_id = None
     self.rack_group_pool_name = None
     self.rack_group_rbd_id = None
     self.rack_group_rbd_name = None
     self.rack_group_servers_id = []
     self.dstpath = '/root'
     self.workload_path = data_dir.CEPH_API_SCENARIOS_TEST_DIR
     self.fio_version = self.params.get('fio_version')
     self.fio_working_path = \
         self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
     self.mid_host_ip = \
         self.params.get('ceph_management_url').split(':')[1].strip('/')
     self.mid_host_user = self.params.get('ceph_server_ssh_username')
     self.mid_host_password = self.params.get('ceph_server_ssh_password')
     self.end_host_user = self.params.get('ceph_node_ssh_username')
     self.end_host_password = self.params.get('ceph_node_ssh_password')
     self.rw = self.params.get('rw', 'randrw')
     self.bs = self.params.get('bs', '8k')
     self.iodepth = self.params.get('iodepth', 128)
     self.runtime = self.params.get('runtime', 120)
     self.rwmixread = self.params.get('rwmixread', 70)
     self.end_host_ip = None
Beispiel #6
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.cluster_client = ClustersClient(params)
     self.server_client = ServersClient(params)
     self.monitor_client = MonitorsClient(params)
     self.pool_client = PoolsClient(params)
     self.osd_client = OsdClient(params)
     self.dstpath = '/root'
     self.workload_path = data_dir.COMMON_TEST_DIR
     self.fio_version = self.params.get('fio_version')
     self.fio_working_path = \
         self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
Beispiel #7
0
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)
        self.server_client = ServersClient(params)
        self.pool_id_before = None
        self.pool_name_before = None
        self.rbd_name_before = None
        self.pool_id_after = None
        self.pool_name_after = None
        self.rbd_name_after = None
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

        self.target_pool = None
        self.rbd_id = None
        self.server_name = None
        self.server_id = None
 def __init__(self, params, env):
     self.params = params
     self.client = ClustersClient(params)
     self.body = {}
     self.env = env
     self.resource_to_delete = []
class TestClusters(test.Test):
    """
    Clusters related tests.
    """
    def __init__(self, params, env):
        self.params = params
        self.client = ClustersClient(params)
        self.body = {}
        self.env = env
        self.resource_to_delete = []

    def setup(self):
        """
        Set up before executing test
        """
        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def test_create(self):
        """
        Execute the test of creating a cluster
        """
        try:
            resp = self.client.create(**self.body)
        except:
            if len(self.body['name']) > 32:
                LOG.warn('Known bug, cluster name should not exceed 32 chars')
                return
            else:
                raise
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Create cluster failed: %s" % self.body)
        cluster_id = resp.body.get('id')
        if self.params.get('resource_need_delete', 'no') in 'yes':
            self.resource_to_delete.append(cluster_id)

    def test_query(self):
        query_type = self.params.get('query_type')

        if query_type in 'all':
            # Test query all clusters
            resp = self.client.query()
            if not len(resp) > 0:
                raise exceptions.TestFail("No clusters found, "
                                          "query all clusters failed")

        elif query_type in 'single':
            # Test query single cluster
            cluster_id = self.env.get('cluster', '1')
            resp = self.client.query(cluster_id)
            if not len(resp) > 0:
                raise exceptions.TestFail("Query cluster failed: %s" %
                                          cluster_id)
        elif query_type in 'single_summary':
            cluster_id = self.env.get('cluster', '1')
            resp = self.client.query(cluster_id, extra_url='summary')
            LOG.info("Got summary of cluster '%s': %s" % (cluster_id, resp))

    def test_cluster_operation(self):
        cluster_ops = self.params.get('cluster_operation')
        cluster_id = int(self.env.get('cluster', 1))
        LOG.info("Try to %s cluster '%s'" % (cluster_ops, cluster_id))
        if cluster_ops in 'start':
            resp = self.client.start_cluster(cluster_id)
            status = test_utils.wait_for_cluster_in_status(
                cluster_id, self.client, 'deployed')
            if not status:
                raise exceptions.TestFail("Failed to start cluster %d" %
                                          cluster_id)

        if cluster_ops in 'restart':
            resp = self.client.restart_cluster(cluster_id)
            status = test_utils.wait_for_cluster_in_status(
                cluster_id, self.client, 'deployed')
            if not status:
                raise exceptions.TestFail("Failed to restart cluster %d" %
                                          cluster_id)

        elif cluster_ops in 'deploy':
            resp = self.client.deploy_cluster(cluster_id)
            status = test_utils.wait_for_cluster_in_status(
                cluster_id, self.client, 'deployed')
            if not status:
                raise exceptions.TestFail("Failed to deploy cluster %d" %
                                          cluster_id)

        elif cluster_ops in 'expand':
            resp = self.client.query(cluster_id, extra_url='summary')
            raw_total_before = resp.body.get('rawTotal')
            resp = self.client.expand_cluster(cluster_id)
            resp = self.client.query(cluster_id, extra_url='summary')
            raw_total_after = resp.body.get('rawTotal')
            if int(raw_total_before) < int(raw_total_after):
                LOG.info("Successfully expanded cluster %d from %d to %d" %
                         (cluster_id, raw_total_before, raw_total_after))
            else:
                msg = ("Failed to expand cluster %d, before: %d; after: %d" %
                       (cluster_id, raw_total_before, raw_total_after))
                LOG.error(msg)

        elif cluster_ops in 'stop':
            resp = self.client.stop_cluster(cluster_id)
            status = test_utils.wait_for_cluster_in_status(
                cluster_id, self.client, 'stopped')
            if not status:
                raise exceptions.TestFail("Failed to stop cluster %d" %
                                          cluster_id)
        elif cluster_ops in 'upgrade':
            resp = self.client.upgrade_cluster(cluster_id)
            status = test_utils.wait_for_cluster_in_status(
                cluster_id, self.client, 'upgrade')
            if not status:
                raise exceptions.TestFail("Failed to upgrade cluster %d" %
                                          cluster_id)

    def test_delete(self):
        """
        Test that deletion of specified cluster
        """
        resp = self.client.create(**self.body)
        cluster_id = resp.get('id')
        LOG.info("Try to delete cluster with ID: %d" % cluster_id)
        self.client.delete_cluster(cluster_id=cluster_id)

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        if self.resource_to_delete:
            for resource_id in self.resource_to_delete:
                self.client.delete_cluster(resource_id)
class TestRBDRemoteBackup(test.Test):
    """
    Module for testing RBD remote backup scenarios
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.body = {}
        self.create_servers_body = {}
        self.cluster_id = None
        self.pool_id = None
        self.target_id = None
        self.rbd_id = None
        self.clusters_client = ClustersClient(self.params)

    def setup(self):
        """
        Set up before executing test
        1. to check if two clusters are available
        2. create one pool: testpool
        3. configure remote backup in the testpool
        """
        # to check if two cluster are available
        clusters = test_utils.get_available_clusters(self.params)
        if len(clusters) < 1:
            raise exceptions.TestSetupFail(
                'There are not enough clusters!')
        elif len(clusters) < 2:
            LOG.info('There are not enough clusters, try to create cluster!')
            self.cluster_id = self._create_cluster()
            self.params['cluster_id'] = self.cluster_id
            self.servers_client = ServersClient(self.params)
            for k, v in self.params.items():
                if 'rest_arg_cluster2_' in k:
                    new_key = k.split('rest_arg_cluster2_')[1]
                    self.create_servers_body[new_key] = v
            self._add_three_hosts()
            self._deploy_cluster()
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) < 2:
                raise exceptions.TestSetupFail(
                    'There are not enough clusters!')

        self.cluster_id = clusters[1]['id']
        self.params['cluster_id'] = self.cluster_id
        for cluster in clusters:
            if cluster['id'] != self.cluster_id:
                self.des_cluster_id = cluster['id']
                self.body['des_cluster_id'] = self.des_cluster_id
                break
        src_host = test_utils.get_available_server_info(self.params,
                                                        self.cluster_id)
        self.src_ip = src_host['publicip']
        self.body['src_ip'] = self.src_ip
        self.src_host_id = src_host['id']
        self.body['src_host_id'] = self.src_host_id
        des_host = test_utils.get_available_server_info(self.params,
                                                        self.des_cluster_id)
        self.des_ip = des_host['publicip']
        self.body['des_ip'] = self.des_ip
        self.des_host_id = des_host['id']
        self.body['des_host_id'] = self.des_host_id

        if self.params.get('pool_id'):
            self.pool_id = self.params.get('pool_id')
        else:
            self.pool_id = test_utils.create_pool(self.params)
            pool_client = PoolsClient(self.params)
            if not test_utils.wait_for_pool_in_state(self.pool_id, pool_client,
                                                     'ready'):
                raise exceptions.TestSetupFail("Failed to creating test pool!")
        self.params['pool_id'] = self.pool_id

        # configure remote backup in testpool
        LOG.info("Try to configure remote backup in pool %s : %s"
                 % (self.pool_id, self.body))
        self.client = RemoteBackupClient(self.params)
        self.client.configure_rbpolicy(**self.body)

        # other pre-conditions
        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password',
                                                'lenovo')
        self.initiator_ip = self.params.get('initiator_ip', self.src_ip)
        self.initiator_username = self.params.get('ceph_node_ssh_username',
                                                  'root')
        self.initiator_password = self.params.get('ceph_node_ssh_password',
                                                  'lenovo')
        # create iscsi client
        self.iscsi_client = ISCSIClient(self.params)

    def _create_cluster(self):
        create_cluster = {'name': self.params.get('cluster_name',
                                                  'cloudtest_cluster_2'),
                          'addr': self.params.get('cluster_addr', 'vm')}
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(create_cluster, resp):
            raise exceptions.TestSetupFail(
                "Create cluster failed: %s" % create_cluster)
        return resp.body.get('id')

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s"
                                      % request_body['servername'])
        LOG.info('Create server %s successfully!'
                 % body['properties'].get('name'))

    def _add_three_hosts(self):
        parent_bucket = self.create_servers_body.get('parent_bucket')
        i = 1
        threads = []
        while self.create_servers_body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = self.create_servers_body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = self.create_servers_body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = self.create_servers_body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = self.create_servers_body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = self.create_servers_body.get(tmp)
            create_server_body = {'servername': servername,
                                  'username': username,
                                  'passwd': password,
                                  'publicip': publicip,
                                  'clusterip': clusterip,
                                  'parent_bucket': parent_bucket}
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            i = i + 1

        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(600)
            except Exception as details:
                LOG.exception('Caught exception waiting for server %d added : %s'
                              % (i, details))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(self.cluster_id,
                                                       self.clusters_client,
                                                       'deployed',
                           int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _start_rbtask(self, rbd_id):
        rbtask_body = {}
        rbtask_body['rbd_id'] = rbd_id
        resp_body = self.client.start_rbtask(**rbtask_body)
        body = resp_body.body
        LOG.info("Create remote backup %s for rbd %s"
                 % (body.get('id'), rbd_id))
        time.sleep(30)
        return body.get('id')

    def _create_iscsi_target(self):
        self.iscsi_target_name = "cloudtest" + \
                                 utils.utils_misc.generate_random_string(6)
        create_body = {'initiator_ips': self.initiator_ip,
                       'target_name': self.iscsi_target_name,
                       'multipath': self.params.get('multipath', 3)}

        resp = self.iscsi_client.create(**create_body)
        if not resp and utils.verify_response(create_body, resp):
            raise exceptions.TestFail("Create target failed: %s "
                                      % create_body)
        return resp.body['target_id']

    def _create_iscsi_lun(self, target_id, rbd_id):
        create_body = {'target_id': target_id,
                'pool_id': self.pool_id,
                'rbd_id': rbd_id}
        resp = self.iscsi_client.add_lun(**create_body)
        return resp.body['lun_id']

    def _delete_iscsi_lun(self, target_id, lun_id):
        body = {
            'target_id': target_id,
            'lun_id': lun_id}

        self.iscsi_client.delete_lun(**body)

    def _delete_target(self, target_id):
        """
        Test that deletion of delete target
        """
        self.iscsi_client.delete_iscsitarget(target_id)
        resp = self.iscsi_client.query()
        for i in range(len(resp)):
            if resp[i]['target_id'] == target_id:
                raise exceptions.TestFail("Delete target failed")

    def _create_and_bind_ISCSI_to_rbd(self, rbd_id):
        self.target_id = self._create_iscsi_target()
        self.lun_id = self._create_iscsi_lun(self.target_id, rbd_id)

    def _start_restore(self, rbd_id, timestamp):
        restore_body = {}
        restore_body['snap_time'] = timestamp
        resp_body = self.client.start_restore(rbd_id, **restore_body)
        LOG.info("Try to recover to remote backup %s!" % timestamp)
        time.sleep(30)
        body = resp_body.body
        return body.get('id')

    def _verify_task_successfully(self, rbtask_id, state):
        extra_url = '/list_rbtasks?count=1024&begin_index=0'
        rbtasks = self.client.list_rbtasks(extra_url)
        rb_record = None
        for rbtask in rbtasks:
            if rbtask['id'] == rbtask_id:
                rb_record = rbtask['properties']['timestamp']
                break
        if rb_record:
            status = test_utils.wait_for_remote_backup_or_restore_complete(rbtask_id,
                                                                          self.client,
                                                                          state,
                                                                          60)
            if status:
                LOG.info("%s successfully, the timestamp is %s"
                         % (state, rb_record))
                return rb_record

        raise exceptions.TestFail("Failed to %s!" % state)

    @staticmethod
    def _verify_file_exist(file_name, mount_point, actual, expected):
        if actual:
            LOG.info("Find %s under %s!" % (file_name, mount_point))
            if actual != expected:
                raise exceptions.TestFail("Expected not find the file %s."
                                          % file_name)
        else:
            LOG.info("%s not found under %s" % (file_name, mount_point))
            if actual != expected:
                raise exceptions.TestFail("Expected can find the file %s."
                                          % file_name)

    def test_rbd_remote_backup(self):
        """
        This test basically performs following steps:
            1. create rbd in testpool
            2. format disk
            3. create remote backup for this rbd(e.g.record1)
            4. write data to this rbd via ISCSI, include 1->11 steps
            5. create remote backup for this rbd(e.g.record2)
            6. recover rbd from record1
            7. repeat step3: sub-step 2)3)4)5)7)
            8. check testfile.txt does not exist
            9. do recover rbd from record2
            10. check testfile.txt exists
        """
        mount_point = self.params.get('mount_point', '/mnt')
        file_name = self.params.get('file_name', 'testfile.txt')
        # step1 create rbd in testpool
        self.rbd_id = test_utils.create_rbd(self.pool_id, self.params)
        LOG.info("Create rbd %s in pool %s" % (self.rbd_id, self.pool_id))
        # step2 format disk
        self._create_and_bind_ISCSI_to_rbd(self.rbd_id)
        time.sleep(60)
        need_mk = True
        create_data = False
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        self._verify_file_exist(file_name, mount_point, find, False)
        self._delete_iscsi_lun(self.target_id, self.lun_id)
        time.sleep(60)
        # step3 create remote backup for this rbd
        rbtask_id_1 = self._start_rbtask(self.rbd_id)
        rb_record_1 = self._verify_task_successfully(rbtask_id_1, 'backed_up')
        # step4 write data to this rbd via ISCSI
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)
        time.sleep(60)
        need_mk = False
        create_data = True
        # step4: sub-step 2)-10)
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        self._verify_file_exist(file_name, mount_point, find, True)
        # step4: sub-step 11)
        self._delete_iscsi_lun(self.target_id, self.lun_id)
        time.sleep(60)
        # step 5 create remote backup for this rbd
        rbtask_id_2 = self._start_rbtask(self.rbd_id)
        rb_record_2 = self._verify_task_successfully(rbtask_id_2, 'backed_up')
        # step 6 recover rbd from rb_record_1
        restore_id = self._start_restore(self.rbd_id, rb_record_1)
        self._verify_task_successfully(restore_id, 'restored')
        # step 7
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)
        time.sleep(60)
        need_mk = False
        create_data = False
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        # step 8 check testfile.txt does not exist
        self._verify_file_exist(file_name, mount_point, find, False)
        self._delete_iscsi_lun(self.target_id, self.lun_id)
        time.sleep(60)
        # step 9 do recover rbd from record2
        restore_id = self._start_restore(self.rbd_id, rb_record_2)
        self._verify_task_successfully(restore_id, 'restored')
        # step 10 verify testfile.txt exists
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)
        time.sleep(60)
        need_mk = False
        create_data = False
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        self._verify_file_exist(file_name, mount_point, find, True)
        self._delete_iscsi_lun(self.target_id, self.lun_id)

    def teardown(self):
        if self.target_id:
            self._delete_target(self.target_id)
        if self.rbd_id:
            try:
                test_utils.delete_rbd(self.pool_id, self.rbd_id, self.params)
            except exceptions.UnexpectedResponseCode, e:
                pass
Beispiel #11
0
def delete_cluster(cluster_id, params):
    LOG.info("Try to delete cluster: %s" % cluster_id)
    clusters_client = ClustersClient(params)
    return clusters_client.delete_cluster(cluster_id)
Beispiel #12
0
def get_available_clusters(params):
    cluster_client = ClustersClient(params)
    clusters = cluster_client.query()
    return clusters
Beispiel #13
0
class TestGroup(test.Test):
    """
    Test group can separate the data io from customized domain
    """
    def __init__(self, params, env):
        self.params = params
        self.clusters_client = ClustersClient(params)
        self.body = {}
        self.env = env
        self.cluster_id = None
        self.host_group_name = 'host_group_' \
                               + utils_misc.generate_random_string(6)
        self.host_group_id = None
        self.host_group_pool_id = None
        self.host_group_pool_name = None
        self.host_group_rbd_id = None
        self.host_group_rbd_name = None
        self.host_group_servers_id = []
        self.rack_group_name = 'rack_group_' \
                               + utils_misc.generate_random_string(6)
        self.rack_group_id = None
        self.rack_group_pool_id = None
        self.rack_group_pool_name = None
        self.rack_group_rbd_id = None
        self.rack_group_rbd_name = None
        self.rack_group_servers_id = []
        self.dstpath = '/root'
        self.workload_path = data_dir.CEPH_API_SCENARIOS_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        self.mid_host_ip = \
            self.params.get('ceph_management_url').split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.rw = self.params.get('rw', 'randrw')
        self.bs = self.params.get('bs', '8k')
        self.iodepth = self.params.get('iodepth', 128)
        self.runtime = self.params.get('runtime', 120)
        self.rwmixread = self.params.get('rwmixread', 70)
        self.end_host_ip = None

    def setup(self):
        """
        Set up before executing test
        """
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')
        LOG.info("Created cluster successfully!")
        self.params['cluster_id'] = self.cluster_id
        self.servers_client = ServersClient(self.params)
        self.group_client = GroupsClient(self.params)
        self.pool_client = PoolsClient(self.params)
        self.rbd_client = RbdClient(self.params)
        self.osd_client = OsdClient(self.params)

    def _copy_fio_package_to_host(self):
        self.end_host_ip = test_utils.get_available_host_ip(self.params)
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

    def _write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin -pool=%s ' % \
               pool_name
        cmd3 = '-rw=%s -rwmixread=%s -bs=%s -iodepth=%s -numjobs=1 -direct=1 ' % \
               (self.rw, self.rwmixread, self.bs, self.iodepth)
        cmd4 = '-runtime=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.runtime, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        LOG.info("cmd = %s" % cmd)

        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_password,
            cmd=cmd,
            timeout=1000)

    def _create_group(self, name, leaf_firstn):
        group_body = {'name': name, 'max_size': 10, 'leaf_firstn': leaf_firstn}
        resp_body = self.group_client.create_group(**group_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create group policy failed")
        LOG.info("Created group '%s' with id: %s" % (body['name'], body['id']))
        return body['id']

    def _create_bucket(self, group_id):
        create_body = {
            'name': 'cloudtest_bucket_' + utils_misc.generate_random_string(6),
            'type': 'rack'
        }
        resp_body = self.group_client.create_bucket(group_id, **create_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create bucket failed")
        LOG.info("Created bucket '%s' with id: %s" %
                 (body['name'], body['id']))
        return body['id']

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      request_body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

    def _add_three_hosts(self, kwargs):
        body = {}
        for k, v in self.params.items():
            if kwargs in k:
                new_key = k.split(kwargs)[1]
                body[new_key] = v
        LOG.info("body = %s" % body)
        i = 1
        threads = []
        while body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = body.get(tmp)
            tmp = 'parent_bucket_%d' % i
            parent_bucket = body.get(tmp)
            create_server_body = {
                'servername': servername,
                'username': username,
                'passwd': password,
                'publicip': publicip,
                'clusterip': clusterip,
                'parent_bucket': parent_bucket
            }
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            i = i + 1

        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(600)
            except Exception as details:
                LOG.exception(
                    'Caught exception waiting for server %d added : %s' %
                    (i, details))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(
            self.cluster_id, self.clusters_client, 'deployed',
            int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _create_pool(self, group_id):
        pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        LOG.info("Try to create pool %s" % pool_name)
        create_pool = {
            'name': pool_name,
            'size': self.params.get('pool_size', 3),
            'group_id': group_id,
            'pg_num': self.params.get('pg_num', 128)
        }
        resp = self.pool_client.create(**create_pool)
        status = self._wait_for_pool_create(pool_name)
        if not status:
            raise exceptions.TestFail('Failed to create pool %s' % pool_name)
        LOG.info('Create pool %s successfully !' % pool_name)
        pool_id = resp.body['properties']['context']['pool_id']
        return pool_id, pool_name

    def _wait_for_pool_create(self, pool_name, timeout=1000):
        def is_pool_create():
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == pool_name \
                        and resp[i]['state'] == 1 \
                        and resp[i]['size'] == 3 \
                        and resp[i]['pg_num'] == 128:
                    return True
            return False

        return utils_misc.wait_for(is_pool_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for pool %s create.' %
                                   pool_name)

    def _create_rbd(self, pool_id, rbd_name):
        LOG.info("Try to create rbd %s" % rbd_name)
        create_rbd = {
            'name': rbd_name,
            'object_size': self.params.get('object_size', 10),
            'capacity': self.params.get('capacity', 1024 * 1024 * 1024)
        }
        self.rbd_client.create(pool_id, **create_rbd)
        status = self._wait_for_rbd_in_status(pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to create rbd %s!' % rbd_name)
        resp = self.rbd_client.query(pool_id)
        for i in range(len(resp)):
            if resp[i]['name'] == rbd_name:
                return resp[i]['id']
        raise exceptions.TestError('Create rbd %s failed' % rbd_name)

    def _wait_for_rbd_in_status(self, pool_id, rbd_name, status, timeout=300):
        status_map = {'copying': 6, 'ready': 0}

        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name:
                    if resp[i]['status'] == status_map[status]:
                        return True
            return False

        return utils_misc.wait_for(is_rbd_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for rbd %s create.' %
                                   rbd_name)

    def _migrate_rbd(self, src_pool_id, des_pool_id, rbd_id, rbd_name):
        LOG.info("Try to migrate rbd %s" % rbd_name)
        move_rbd = {'target_pool': des_pool_id}
        resp = self.rbd_client.migrate(src_pool_id, rbd_id, **move_rbd)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        status = self._wait_for_rbd_in_status(des_pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to migrate rbd %s!' % rbd_name)
        LOG.info('Migrate rbd %s successfully !' % rbd_name)

    def _get_servers_id(self):
        query_server = {'marker': 0, 'pagesize': 100}
        servers = self.servers_client.query(**query_server)
        if not len(servers) > 0:
            raise exceptions.TestFail("No available server found!")
        for server in servers:
            if server['group']['id'] == str(self.host_group_id):
                self.host_group_servers_id.append(server['id'])
            elif server['group']['id'] == str(self.rack_group_id):
                self.rack_group_servers_id.append(server['id'])
        LOG.info('Host group servers: %s' % self.host_group_servers_id)
        LOG.info('Rack group servers: %s' % self.rack_group_servers_id)

    def _get_osd_capacity(self, server_id):
        resp = self.osd_client.get_osd_capacity(server_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Query osd capacity failed")
        return resp.get('capacityUsed')

    def _get_osd_capacity_within_group(self, group_tag):
        total_capacity_used = 0
        if group_tag in 'host_group_':
            for server_id in self.host_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        elif group_tag in 'rack_group_':
            for server_id in self.rack_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        return total_capacity_used

    def test(self):
        """
        1. Create host group with host level, and add 3 hosts to this group
        2. Create host group with rack level, and add 3 other hosts to this group
        3. Deploy cluster
        4. Create pool in host group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        5. Create pool in rack group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        6. check osd capacity is changed only in the osd within r/w group
        7. Rbd migration: migrate rbd from pool 1 to pool 2,
        and execute FIO r/w, check r/w works ok
        8. Down one host from one group, and then w/r data in other group
        check data r/w in other group works ok
        """
        # Step 1: Create host group with host level, and add 3 hosts
        self.host_group_id = self._create_group(self.host_group_name, 'host')
        host_bucket_id = self._create_bucket(self.host_group_id)
        self.params['host_group_parent_bucket_1'] = host_bucket_id
        self.params['host_group_parent_bucket_2'] = host_bucket_id
        self.params['host_group_parent_bucket_3'] = host_bucket_id
        self._add_three_hosts("host_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.host_group_name)

        # Step 2: Create host group with rack level, and add 3 hosts
        self.rack_group_id = self._create_group(self.rack_group_name, 'rack')
        rack_bucket_id_1 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_2 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_3 = self._create_bucket(self.rack_group_id)
        self.params['rack_group_parent_bucket_1'] = rack_bucket_id_1
        self.params['rack_group_parent_bucket_2'] = rack_bucket_id_2
        self.params['rack_group_parent_bucket_3'] = rack_bucket_id_3
        self._add_three_hosts("rack_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.rack_group_name)

        # Step 3: deploy cluster
        self._deploy_cluster()
        self._get_servers_id()

        # Step 4:create pool in host group, rbd, do FIO r/w, check r/w works ok
        self._copy_fio_package_to_host()
        self.host_group_pool_id, self.host_group_pool_name = \
            self._create_pool(self.host_group_id)
        self.host_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.host_group_rbd_id = self._create_rbd(self.host_group_pool_id,
                                                  self.host_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.host_group_rbd_name, self.host_group_pool_id))
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=True)

        # Step 5:create pool in rack group, rbd, do FIO r/w, check r/w works ok
        self.rack_group_pool_id, self.rack_group_pool_name = \
            self._create_pool(self.rack_group_id)
        self.rack_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.rack_group_rbd_id = self._create_rbd(self.rack_group_pool_id,
                                                  self.rack_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.rack_group_rbd_id, self.rack_group_pool_id))
        capacity_used_before = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("The previous used capacity is %s" % capacity_used_before)
        self._write_rbd(self.rack_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 6:check osd capacity is changed
        # only in the osd within r/w group
        capacity_used_after = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("Later used capacity is %s" % capacity_used_after)
        if capacity_used_after < capacity_used_before * 0.95:
            raise exceptions.TestFail(
                "Do r/w in the osd of rack group, "
                "affect the used capacity of host group!")

        # Step 7:Rbd migration: migrate rbd from pool 1 to pool 2
        self._migrate_rbd(self.rack_group_pool_id, self.host_group_pool_id,
                          self.rack_group_rbd_id, self.rack_group_rbd_name)
        self._write_rbd(self.host_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 8:Down one host from one group,
        # and then w/r data in other group
        test_utils.delete_osd(self.rack_group_servers_id[0], self.params)
        self.servers_client.delete_server(self.rack_group_servers_id[0])
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=False)

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % (
                os.path.join(self.dstpath, self.fio_version),
                os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_password,
                cmd=cmd,
                cmd_mid=cmd_mid)
        if self.host_group_pool_id and self.host_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.host_group_rbd_id)
        if self.host_group_pool_id and self.rack_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.rack_group_rbd_id)
        if self.host_group_pool_id:
            self.pool_client.delete_pool(self.host_group_pool_id)
        if self.rack_group_pool_id:
            self.pool_client.delete_pool(self.rack_group_pool_id)
Beispiel #14
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.body = {}
     self.clusters_client = ClustersClient(params)
     self.servers_client = None
Beispiel #15
0
class TestDeploy(test.Test):
    """
    Module for testing snapshot related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.body = {}
        self.clusters_client = ClustersClient(params)
        self.servers_client = None

    def setup(self):
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')

        self.params['cluster_id'] = self.cluster_id
        self.groups_client = GroupsClient(self.params)
        self.servers_client = ServersClient(self.params)
        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 800)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      request_body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(
            self.cluster_id, self.clusters_client, 'deployed',
            int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _configure_zabbix_server(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        # ceph_server_ip = ceph_server_ip.split(':')[1].strip('/')
        ceph_server_ip = test_utils.get_ip_from_string(ceph_server_ip)
        if not ceph_server_ip:
            msg = "get ceph server ip from management url error."
            logging.error(msg)
            raise exceptions.TestFail(msg)
        ceph_ssh_username = self.params.get('ceph_server_ssh_username', 'root')
        ceph_ssh_password = self.params.get('ceph_server_ssh_password')
        LOG.info("Configuring zabbix server on Ceph server")
        session = remote.RemoteRunner(host=ceph_server_ip,
                                      username=ceph_ssh_username,
                                      password=ceph_ssh_password)
        cmd = 'source ~/localrc; '
        cmd += 'cephmgmtclient update-cluster-conf -c %s -z' % self.cluster_id
        cmd += ' %s -u admin -p zabbix -t 600 -r 10' % self.params.get(
            'zabbix_server_ip')
        logging.info("cmd is:%s" % cmd)
        session.run(cmd)
        session.session.close()

    def test_deploy_cluster_with_multi_hosts(self):
        """
        This test basically performs following steps:
            1. create three hosts
            2. deploy the cluster
        """
        groups = self.groups_client.list_groups()
        parent_bucket = groups[0]['id']
        logging.info("cluster id is %s, parent_bucket id is %s" %
                     (self.cluster_id, parent_bucket))
        # create three hosts
        isbackup = self.body.get('backup_node')
        i = 1
        threads = []
        while self.body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = self.body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = self.body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = self.body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = self.body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = self.body.get(tmp)
            tmp = 'managerip_%d' % i
            managerip = self.body.get(tmp)
            create_server_body = {
                'servername': servername,
                'username': username,
                'passwd': password,
                'publicip': publicip,
                'clusterip': clusterip,
                'managerip': managerip,
                'parent_bucket': parent_bucket,
                'backup_node': isbackup
            }
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            # self._create_server(create_server_body)
            i = i + 1
        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(800)
            except:
                logging.exception(
                    'Caught exception waiting for server %d added!' % i)
        logging.info('======start to _configure_zabbix_server=====')
        self._configure_zabbix_server()
        logging.info('======finished to _configure_zabbix_server=====')
        # deploy the cluster
        self._deploy_cluster()
        time.sleep(60)

    def teardown(self):
        pass