Exemplo n.º 1
0
    def setup(self):
        """
        Set up before executing test
        """
        if 'cluster' in self.env:
            self.cluster_id = self.env.get('cluster')
        elif self.params.get('cluster_id'):
            self.cluster_id = int(self.params.get('cluster_id'))
        else:
            raise exceptions.TestSetupFail(
                'Please set cluster_id in config first')

        self.params['cluster_id'] = self.cluster_id
        self.client = ServersClient(self.params)
        self.ipv6 = self.params.get('IPV6', False)
        if self.ipv6 == False:
            self.controller_ip = self.params.get('ceph_management_url').split(
                ':')[1].strip('/')
        else:
            self.controller_ip = re.findall(
                r"[http|https]://\[(.*)\].*",
                self.params.get('ceph_management_url'),
                flags=0)[0]

        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v
Exemplo n.º 2
0
 def __init__(self, params, env):
     self.params = params
     self.env = env
     self.cluster_client = ClustersClient(params)
     self.server_client = ServersClient(params)
     self.monitor_client = MonitorsClient(params)
     self.pool_client = PoolsClient(params)
     self.osd_client = OsdClient(params)
     self.dstpath = '/root'
     self.workload_path = data_dir.COMMON_TEST_DIR
     self.fio_version = self.params.get('fio_version')
     self.fio_working_path = \
         self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
Exemplo n.º 3
0
    def __init__(self, params, env):
        self.params = params
        self.clusterconf_client = ClustersConfClient(params)
        self.zabbix_client = ZabbixClient(params)
        self.cluster_client = ClustersClient(params)
        self.server_client = ServersClient(params)
        self.body = {}
        self.env = env
        self.pool_id = None
        self.pool_name = None
        self.recover_item_id = None
        self.statistical_time = self.params.get('statistical_time', 1800)
        self.interval_time = self.params.get('interval_time', 30)
        self.rbds_id = []
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path =\
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        self.mid_host_ip =\
            self.params.get('ceph_management_url').split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.end_host_ip = test_utils.get_available_host_ip(self.params)

        self.server_name = None
        self.server_id = None
Exemplo n.º 4
0
 def __init__(self, params, env):
     self.params = params
     self.client = WarningsClient(params)
     self.server_client = ServersClient(params)
     self.monitor_client = MonitorsClient(params)
     self.osd_client = OsdClient(params)
     self.body = {}
     self.env = env
Exemplo n.º 5
0
    def setup(self):
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')

        self.params['cluster_id'] = self.cluster_id
        self.groups_client = GroupsClient(self.params)
        self.servers_client = ServersClient(self.params)
        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v
Exemplo n.º 6
0
def get_server_id_by_name(params, server_name):
    server_client = ServersClient(params)
    query_server = {'marker': 0, 'pagesize': 100}
    servers = _get_servers(server_client, query_server)
    if not len(servers) > 0:
        LOG.error("No available server found!")
        return None

    for server in servers:
        if server['servername'] in server_name:
            return server['id']
Exemplo n.º 7
0
 def setup(self):
     """
     Set up before executing test
     """
     LOG.info("Try to create cluster cloudtest_cluster")
     create_cluster = {
         'name': self.params.get('cluster_name', 'cloudtest_cluster'),
         'addr': self.params.get('cluster_addr', 'vm')
     }
     resp = self.clusters_client.create(**create_cluster)
     if not resp and utils.verify_response(self.body, resp):
         raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                        self.body)
     self.cluster_id = resp.body.get('id')
     LOG.info("Created cluster successfully!")
     self.params['cluster_id'] = self.cluster_id
     self.servers_client = ServersClient(self.params)
     self.group_client = GroupsClient(self.params)
     self.pool_client = PoolsClient(self.params)
     self.rbd_client = RbdClient(self.params)
     self.osd_client = OsdClient(self.params)
Exemplo n.º 8
0
def get_available_host_ip(params):
    server_client = ServersClient(params)
    query_server = {'marker': 0, 'pagesize': 100}
    servers = _get_servers(server_client, query_server)
    if not len(servers) > 0:
        LOG.error("No available server found!")
        return None
    for server in servers:
        if len(server['mons']) == 0:
            continue
        if server['state'] == 3 and server['status'] == 1 \
                and server['mons'][0]['role'] == 'follower':
            return server['publicip']
    return servers[0]['publicip']
Exemplo n.º 9
0
def get_available_server_info(params, cluster_id):
    tmp = params['cluster_id']
    params['cluster_id'] = cluster_id
    query_server = {'marker': 0, 'pagesize': 100}
    server_client = ServersClient(params)
    params['cluster_id'] = tmp
    servers = _get_servers(server_client, query_server)
    if not len(servers) > 0:
        LOG.error("No available server found!")
        return None
    for server in servers:
        if len(server['mons']) == 0:
            continue
        if server['state'] == 3 and server['status'] == 1:
            return server
    return servers[0]
Exemplo n.º 10
0
def get_available_server(params):
    vip1_hostname = get_vip1_hostname(params) if params.get(
        'HA_Enable') == 'yes' else ''
    server_client = ServersClient(params)
    query_server = {'marker': 0, 'pagesize': 100}
    servers = _get_servers(server_client, query_server)
    if not len(servers) > 0:
        LOG.error("No available server found!")
        return None
    for server in servers:
        if len(server['mons']) == 0:
            continue
        # server must not be controller and HA's VIP server
        if server['state'] == 3 and server['status'] == 1 \
                and server['mons'][0]['role'] == 'follower' \
                and server['servername'] not in ['controller', vip1_hostname]:
            return server['id']
    return servers[0]['id']
Exemplo n.º 11
0
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.pool_client = PoolsClient(params)
        self.rbd_client = RbdClient(params)
        self.server_client = ServersClient(params)
        self.pool_id_before = None
        self.pool_name_before = None
        self.rbd_name_before = None
        self.pool_id_after = None
        self.pool_name_after = None
        self.rbd_name_after = None
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        LOG.info('******************%s' % self.workload_path)
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = None

        self.target_pool = None
        self.rbd_id = None
        self.server_name = None
        self.server_id = None
Exemplo n.º 12
0
class TestDeploy(test.Test):
    """
    Module for testing snapshot related operations.
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.body = {}
        self.clusters_client = ClustersClient(params)
        self.servers_client = None

    def setup(self):
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')

        self.params['cluster_id'] = self.cluster_id
        self.groups_client = GroupsClient(self.params)
        self.servers_client = ServersClient(self.params)
        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 800)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      request_body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(
            self.cluster_id, self.clusters_client, 'deployed',
            int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _configure_zabbix_server(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        # ceph_server_ip = ceph_server_ip.split(':')[1].strip('/')
        ceph_server_ip = test_utils.get_ip_from_string(ceph_server_ip)
        if not ceph_server_ip:
            msg = "get ceph server ip from management url error."
            logging.error(msg)
            raise exceptions.TestFail(msg)
        ceph_ssh_username = self.params.get('ceph_server_ssh_username', 'root')
        ceph_ssh_password = self.params.get('ceph_server_ssh_password')
        LOG.info("Configuring zabbix server on Ceph server")
        session = remote.RemoteRunner(host=ceph_server_ip,
                                      username=ceph_ssh_username,
                                      password=ceph_ssh_password)
        cmd = 'source ~/localrc; '
        cmd += 'cephmgmtclient update-cluster-conf -c %s -z' % self.cluster_id
        cmd += ' %s -u admin -p zabbix -t 600 -r 10' % self.params.get(
            'zabbix_server_ip')
        logging.info("cmd is:%s" % cmd)
        session.run(cmd)
        session.session.close()

    def test_deploy_cluster_with_multi_hosts(self):
        """
        This test basically performs following steps:
            1. create three hosts
            2. deploy the cluster
        """
        groups = self.groups_client.list_groups()
        parent_bucket = groups[0]['id']
        logging.info("cluster id is %s, parent_bucket id is %s" %
                     (self.cluster_id, parent_bucket))
        # create three hosts
        isbackup = self.body.get('backup_node')
        i = 1
        threads = []
        while self.body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = self.body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = self.body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = self.body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = self.body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = self.body.get(tmp)
            tmp = 'managerip_%d' % i
            managerip = self.body.get(tmp)
            create_server_body = {
                'servername': servername,
                'username': username,
                'passwd': password,
                'publicip': publicip,
                'clusterip': clusterip,
                'managerip': managerip,
                'parent_bucket': parent_bucket,
                'backup_node': isbackup
            }
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            # self._create_server(create_server_body)
            i = i + 1
        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(800)
            except:
                logging.exception(
                    'Caught exception waiting for server %d added!' % i)
        logging.info('======start to _configure_zabbix_server=====')
        self._configure_zabbix_server()
        logging.info('======finished to _configure_zabbix_server=====')
        # deploy the cluster
        self._deploy_cluster()
        time.sleep(60)

    def teardown(self):
        pass
Exemplo n.º 13
0
class TestServers(test.Test):
    """
    Servers related tests.
    """
    def __init__(self, params, env):
        self.params = params
        self.body = {}
        self.env = env
        self.cluster_id = ""
        self.controller_username = self.params.get('ceph_server_ssh_username')
        self.controller_password = self.params.get('ceph_server_ssh_password')
        self.timeout = int(self.params.get('server_operation_timeout', 900))

    def setup(self):
        """
        Set up before executing test
        """
        if 'cluster' in self.env:
            self.cluster_id = self.env.get('cluster')
        elif self.params.get('cluster_id'):
            self.cluster_id = int(self.params.get('cluster_id'))
        else:
            raise exceptions.TestSetupFail(
                'Please set cluster_id in config first')

        self.params['cluster_id'] = self.cluster_id
        self.client = ServersClient(self.params)
        self.ipv6 = self.params.get('IPV6', False)
        if self.ipv6 == False:
            self.controller_ip = self.params.get('ceph_management_url').split(
                ':')[1].strip('/')
        else:
            self.controller_ip = re.findall(
                r"[http|https]://\[(.*)\].*",
                self.params.get('ceph_management_url'),
                flags=0)[0]

        for k, v in self.params.items():
            if 'rest_arg_' in k:
                new_key = k.split('rest_arg_')[1]
                self.body[new_key] = v

    def _query_servers(self):
        servers = self.client.query(**self.body)
        if not len(servers) > 0:
            raise exceptions.TestFail("No servers found, "
                                      "query all servers failed")
        for server in servers:
            if self.env.get('server_name') is not None:
                if self.env['server_name'] == server['servername']:
                    self.env['tmp_server_id'] = server['id']
                    break

    def test_query(self):
        # Test query all servers
        self._query_servers()

    def test_create(self):
        """
        Execute the test of creating a Server
        """
        # if there is not enough resource to create server, skip it
        if not (self.body.get('servername') and self.body.get('publicip')
                and self.body.get('clusterip') and self.body.get('username')
                and self.body.get('password')):
            raise exceptions.TestSkipError("There is not enough resource"
                                           " to create server!")

        if not self.body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            self.body.update({'parent_bucket': parent_id})
        resp_body = self.client.create(**self.body)
        body = resp_body.body
        if not resp_body and utils.verify_response(self.body, resp_body):
            raise exceptions.TestFail("Create server failed: %s" % self.body)

        status = test_utils.wait_for_server_in_status(
            'servername', self.body['servername'], self.client, 'added', 1,
            int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      self.body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

        self.env['server_name'] = body['properties'].get('name')
        self._query_servers()

    def __get_server_ip(self, server_id):
        query_server = {'marker': 0, 'pagesize': 100}
        servers = self.client.query(**query_server)
        if not len(servers) > 0:
            LOG.error("No available server found!")
            return None
        for server in servers:
            if len(server['mons']) == 0:
                continue
            if server['id'] == server_id:
                return server['publicip']

        return None

    def __wait_for_server_in_status(self, host_ip, reachable=True):
        end_time = time.time() + float(self.timeout)

        while time.time() < end_time:
            output = remote.ping_host(self.controller_ip,
                                      self.controller_username,
                                      self.controller_password,
                                      host_ip,
                                      reachable,
                                      ipv6=self.ipv6)
            if output:
                return output

            time.sleep(3)

        return None

    def __check_vm_server_restart(self, server_id):
        server_ip = self.__get_server_ip(server_id)
        LOG.info("Server ip: %s" % server_ip)
        if not server_ip:
            raise exceptions.TestFail("Cannot get server ip by server id %s!" %
                                      server_id)

        # wait for host unreachable
        self.__wait_for_server_in_status(server_ip, False)

        # wait for host reachable
        self.__wait_for_server_in_status(server_ip, True)

    def test_server_operation(self):
        server_ops = self.params.get('server_operation')
        server_id = test_utils.get_available_server(self.params)
        if not server_id:
            raise exceptions.TestSetupFail('No available server found!')

        if server_ops == 'stop_maintenance':
            server_id = self.env.get('maintenance_server_id')
            if not server_id:
                raise exceptions.TestSkipError("No host needs "
                                               "to stop maintenance!")
        LOG.info("Try to %s server '%s' on cluster %s" %
                 (server_ops, server_id, self.cluster_id))
        state = None
        _status = 1
        if server_ops == 'start':
            state = 'active'
            _status = 1
            self.client.start_server(server_id)

        if server_ops == 'stop':
            state = 'active'
            _status = 0
            self.client.stop_server(server_id)

        if server_ops == 'restart':
            state = 'active'
            _status = 0
            self.client.restart_server(server_id)

        if server_ops in 'start_maintenance':
            state = 'maintenance'
            _status = 1
            self.client.start_maintenance(server_id)
            self.env['maintenance_server_id'] = server_id

        if server_ops == 'stop_maintenance':
            state = 'active'
            _status = 1
            self.client.stop_maintenance(server_id)

        #verify server status
        if server_ops == 'restart':
            node_info_client = NodeInfoClient(self.params)
            body_query = node_info_client.query_phydisks(
                self.cluster_id, server_id)
            if len(body_query) > 0:
                if body_query[0].get('location_led') == -1:
                    LOG.info("Testing on vm environment!")
                    self.__check_vm_server_restart(server_id)
                    return
                else:
                    LOG.info("Testing on physical environment!")
                    status = test_utils.wait_for_server_in_status(
                        'id', server_id, self.client, state, _status,
                        self.timeout)
                    _status = 1
                    if not status:
                        raise exceptions.TestFail("Failed to %s server %s" %
                                                  (server_ops, server_id))

        status = test_utils.wait_for_server_in_status('id', server_id,
                                                      self.client, state,
                                                      _status, self.timeout)
        if not status:
            raise exceptions.TestFail("Failed to %s server %s" %
                                      (server_ops, server_id))
        time.sleep(60)

    def test_delete(self):
        """
        Test that deletion of specified server
        """
        server_id = self.env.get('tmp_server_id')
        if not server_id:
            raise exceptions.TestSkipError("There is not enough server "
                                           "can be deleted!")

        self.client.delete_server(server_id)
        del self.env['tmp_server_id']

    def test_get_server_disks(self):
        """
        Test get the disks of specified server
        """
        if self.params.get('server_id'):
            server_id = int(self.params.get('server_id'))
        else:
            server_id = test_utils.get_available_server(self.params)
        self.client.get_server_disks(server_id)

    def test_get_server_nics(self):
        """
        Test get the disks of specified server
        """
        if self.params.get('server_id'):
            server_id = int(self.params.get('server_id'))
        else:
            server_id = test_utils.get_available_server(self.params)
        self.client.get_server_nics(server_id)

    def test_add_cephed_server(self):
        """
        Execute the test of creating a Server
        """
        resp = self.client.add_cephed_server(**self.body)
        if not len(resp) > 0:
            raise exceptions.TestFail("Failed to add cephed server!")

    def test_get_server_detail(self):
        """
        Test get all the details of specified server
        """
        if self.params.get('server_id'):
            server_id = int(self.params.get('server_id'))
        else:
            server_id = test_utils.get_available_server(self.params)
        resp = self.client.get_server_detail(server_id)
        if not len(resp) > 0:
            raise exceptions.TestFail(
                "Failed to get server detail information.")

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        pass
Exemplo n.º 14
0
class ClusterMaintenance(test.Test):
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.cluster_client = ClustersClient(params)
        self.server_client = ServersClient(params)
        self.monitor_client = MonitorsClient(params)
        self.pool_client = PoolsClient(params)
        self.osd_client = OsdClient(params)
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]

    def setup(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.cluster_id = self.params.get('cluster_id')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_passwprd = self.params.get('ceph_node_ssh_password')

        self.end_host_ip = test_utils.get_available_host_ip(self.params)

    def test(self):
        """
        1. start maintenance
        2. check osd, mon, agent status
        3. run fio
        5. wait 400s, stop maintenance, run step 2
        6. run step 4
        7. run fio
        """
        self.__copy_file()
        self.__get_available_server()
        self.__start_maintenance()
        status = self.__wait_for_osd_in_status('down')
        if not status:
            raise exceptions.TestFail(
                'Osd status should be down, please check!')
        time.sleep(10)
        self.__check_monitor_status(status='inactive')
        self.__check_service_status(cmd='systemctl status sds-agent',
                                    pat='Active: (.*)',
                                    service_type='agent',
                                    status='dead')
        self.__get_pool_name_and_id()
        self.__create_rbd()
        self.__write_rbd(flag=True)
        osd_dict_before = self.__get_osd_capacity()
        LOG.info('Begin to sleep 300s ...')
        time.sleep(300)

        self.__stop_maintenance()
        status = self.__wait_for_osd_in_status(status='up')
        if not status:
            raise exceptions.TestFail('Osd status should be up, please check!')
        time.sleep(10)
        self.__check_monitor_status(status='active')
        self.__check_service_status(cmd='systemctl status sds-agent',
                                    pat='Active: (.*)',
                                    service_type='agent',
                                    status='running')
        self.__create_rbd()
        self.__write_rbd()
        osd_dict_after = self.__get_osd_capacity()
        self.__check_osd_capacity(osd_dict_before, osd_dict_after)

    def __copy_file(self):
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_passwprd,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

    def __get_osd_capacity(self):
        """
        Get osd capacity var ceph osd df.
        """
        osd_dict = {}
        stdout_msg = remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd='ceph osd df',
            timeout=1000)
        stdout_msg = stdout_msg.strip()
        msg_list = stdout_msg.split('\n')
        for osd in self.osd_list:
            osd_id = osd.get('osdId')
            for msg in msg_list:
                msg = msg.strip()
                msg = msg.split()
                if msg[0].isdigit() and int(msg[0]) == osd_id:
                    osd_dict[osd_id] = float(msg[6])
        return osd_dict

    @staticmethod
    def __check_osd_capacity(osd_dict_before, osd_dict_after):
        """
        Check osd can use after host maintenance 2 hours.
        :param osd_dict_before: osd capacity in this host
               before run fio.
        :param osd_dict_after: osd capacity in this host
               after run fio.
        """
        for key in osd_dict_before.keys():
            if osd_dict_after[key] > osd_dict_before[key]:
                raise exceptions.TestFail('Osd AVAIL increased!')

    def __get_available_server(self):
        self.server_id = test_utils.get_available_server(self.params)

    def __start_maintenance(self):
        LOG.info('Start host maintenance...')
        self.server_client.start_maintenance(self.server_id)

    def __stop_maintenance(self):
        LOG.info('Stop host maintenance...')
        self.server_client.stop_maintenance(self.server_id)

    def __get_pool_name_and_id(self):
        pools = self.pool_client.query()
        if not len(pools):
            raise exceptions.TestSetupFail('No pool found!')
        self.pool_id = pools[0]['id']
        self.pool_name = pools[0]['name']

    def __create_rbd(self):
        resp = test_utils.create_rbd_with_capacity(self.pool_id, self.params,
                                                   RBD_CAPACITY, True)
        self.rbd_id = resp.get('id')
        self.rbd_name = resp.get('name')

    def __check_osd_status(self, status):
        LOG.info('Check osd status ...')
        resp = self.osd_client.get_osd_capacity(self.server_id)
        self.osd_list = resp['osds']
        for i in range(len(self.osd_list)):
            osd = self.osd_list[i]
            osd_name = osd['osdName']
            if osd.get('osdStatus') not in status:
                raise exceptions.TestFail(
                    'Osd %s status error(status: %s), '
                    'status should be %s' %
                    (osd_name, osd.get('osdStatus'), status))
        LOG.info('Check osd status pass !')

    def __wait_for_osd_in_status(self, status):
        def is_in_status():
            resp = self.osd_client.get_osd_capacity(self.server_id)
            self.osd_list = resp['osds']
            for i in range(len(self.osd_list)):
                osd = self.osd_list[i]
                if osd['osdStatus'] not in status:
                    return False
            return True

        return utils_misc.wait_for(is_in_status,
                                   timeout=360,
                                   first=0,
                                   step=30,
                                   text='Waiting for osd in status!')

    def __check_monitor_status(self, status):
        LOG.info('Check monitor status ...')
        resp = self.monitor_client.query(self.cluster_id, self.server_id)
        if len(resp) == 0:
            raise exceptions.TestFail('No minitor on server %s.' %
                                      self.server_id)
        if resp[0]['state'] not in status:
            raise exceptions.TestFail('Monitor state should be %s not %s' %
                                      (status, resp[0]['state']))

    def __check_service_status(self, cmd, pat, service_type, status=None):
        stdout_msg = remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)
        result = re.findall(pat, stdout_msg)
        if 'agent' in service_type:
            if len(result) != 0:
                if status in result[0]:
                    return
                raise exceptions.TestFail('Agent status error !')
        else:
            if len(result) != 0:
                raise exceptions.TestFail('Ceph goes to recovery mode!')

    def __write_rbd(self, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin '
        cmd3 = '-pool=%s -rw=write -bs=1M -iodepth=128 -numjobs=1 -direct=1 ' % \
               self.pool_name
        cmd4 = '-size=1M -group_reporting -rbdname=%s -name=mytest' % \
               self.rbd_name
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)

    def teardown(self):
        # delete files
        cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath, self.fio_version))
        cmd1 = 'pkill fio || true; '
        cmd2 = 'rm -rf %s %s' % (os.path.join(self.dstpath, self.fio_version),
                                 os.path.join(self.dstpath,
                                              self.fio_working_path))
        cmd = cmd1 + cmd2
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            cmd_mid=cmd_mid)
Exemplo n.º 15
0
class TestGroup(test.Test):
    """
    Test group can separate the data io from customized domain
    """
    def __init__(self, params, env):
        self.params = params
        self.clusters_client = ClustersClient(params)
        self.body = {}
        self.env = env
        self.cluster_id = None
        self.host_group_name = 'host_group_' \
                               + utils_misc.generate_random_string(6)
        self.host_group_id = None
        self.host_group_pool_id = None
        self.host_group_pool_name = None
        self.host_group_rbd_id = None
        self.host_group_rbd_name = None
        self.host_group_servers_id = []
        self.rack_group_name = 'rack_group_' \
                               + utils_misc.generate_random_string(6)
        self.rack_group_id = None
        self.rack_group_pool_id = None
        self.rack_group_pool_name = None
        self.rack_group_rbd_id = None
        self.rack_group_rbd_name = None
        self.rack_group_servers_id = []
        self.dstpath = '/root'
        self.workload_path = data_dir.CEPH_API_SCENARIOS_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        self.mid_host_ip = \
            self.params.get('ceph_management_url').split(':')[1].strip('/')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_password = self.params.get('ceph_node_ssh_password')
        self.rw = self.params.get('rw', 'randrw')
        self.bs = self.params.get('bs', '8k')
        self.iodepth = self.params.get('iodepth', 128)
        self.runtime = self.params.get('runtime', 120)
        self.rwmixread = self.params.get('rwmixread', 70)
        self.end_host_ip = None

    def setup(self):
        """
        Set up before executing test
        """
        LOG.info("Try to create cluster cloudtest_cluster")
        create_cluster = {
            'name': self.params.get('cluster_name', 'cloudtest_cluster'),
            'addr': self.params.get('cluster_addr', 'vm')
        }
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestSetupFail("Create cluster failed: %s" %
                                           self.body)
        self.cluster_id = resp.body.get('id')
        LOG.info("Created cluster successfully!")
        self.params['cluster_id'] = self.cluster_id
        self.servers_client = ServersClient(self.params)
        self.group_client = GroupsClient(self.params)
        self.pool_client = PoolsClient(self.params)
        self.rbd_client = RbdClient(self.params)
        self.osd_client = OsdClient(self.params)

    def _copy_fio_package_to_host(self):
        self.end_host_ip = test_utils.get_available_host_ip(self.params)
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_password,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)

    def _write_rbd(self, pool_name, rbd_name, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin -pool=%s ' % \
               pool_name
        cmd3 = '-rw=%s -rwmixread=%s -bs=%s -iodepth=%s -numjobs=1 -direct=1 ' % \
               (self.rw, self.rwmixread, self.bs, self.iodepth)
        cmd4 = '-runtime=%s -group_reporting -rbdname=%s -name=mytest' % \
               (self.runtime, rbd_name)
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        LOG.info("cmd = %s" % cmd)

        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_password,
            cmd=cmd,
            timeout=1000)

    def _create_group(self, name, leaf_firstn):
        group_body = {'name': name, 'max_size': 10, 'leaf_firstn': leaf_firstn}
        resp_body = self.group_client.create_group(**group_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create group policy failed")
        LOG.info("Created group '%s' with id: %s" % (body['name'], body['id']))
        return body['id']

    def _create_bucket(self, group_id):
        create_body = {
            'name': 'cloudtest_bucket_' + utils_misc.generate_random_string(6),
            'type': 'rack'
        }
        resp_body = self.group_client.create_bucket(group_id, **create_body)
        body = resp_body.body
        if 'id' not in body:
            raise exceptions.TestFail("Create bucket failed")
        LOG.info("Created bucket '%s' with id: %s" %
                 (body['name'], body['id']))
        return body['id']

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s" %
                                      request_body['servername'])
        LOG.info('Create server %s successfully!' %
                 body['properties'].get('name'))

    def _add_three_hosts(self, kwargs):
        body = {}
        for k, v in self.params.items():
            if kwargs in k:
                new_key = k.split(kwargs)[1]
                body[new_key] = v
        LOG.info("body = %s" % body)
        i = 1
        threads = []
        while body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = body.get(tmp)
            tmp = 'parent_bucket_%d' % i
            parent_bucket = body.get(tmp)
            create_server_body = {
                'servername': servername,
                'username': username,
                'passwd': password,
                'publicip': publicip,
                'clusterip': clusterip,
                'parent_bucket': parent_bucket
            }
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            i = i + 1

        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(600)
            except Exception as details:
                LOG.exception(
                    'Caught exception waiting for server %d added : %s' %
                    (i, details))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(
            self.cluster_id, self.clusters_client, 'deployed',
            int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _create_pool(self, group_id):
        pool_name = 'cloudtest_' + utils_misc.generate_random_string(6)
        LOG.info("Try to create pool %s" % pool_name)
        create_pool = {
            'name': pool_name,
            'size': self.params.get('pool_size', 3),
            'group_id': group_id,
            'pg_num': self.params.get('pg_num', 128)
        }
        resp = self.pool_client.create(**create_pool)
        status = self._wait_for_pool_create(pool_name)
        if not status:
            raise exceptions.TestFail('Failed to create pool %s' % pool_name)
        LOG.info('Create pool %s successfully !' % pool_name)
        pool_id = resp.body['properties']['context']['pool_id']
        return pool_id, pool_name

    def _wait_for_pool_create(self, pool_name, timeout=1000):
        def is_pool_create():
            resp = self.pool_client.query()
            for i in range(len(resp)):
                if resp[i]['name'] == pool_name \
                        and resp[i]['state'] == 1 \
                        and resp[i]['size'] == 3 \
                        and resp[i]['pg_num'] == 128:
                    return True
            return False

        return utils_misc.wait_for(is_pool_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for pool %s create.' %
                                   pool_name)

    def _create_rbd(self, pool_id, rbd_name):
        LOG.info("Try to create rbd %s" % rbd_name)
        create_rbd = {
            'name': rbd_name,
            'object_size': self.params.get('object_size', 10),
            'capacity': self.params.get('capacity', 1024 * 1024 * 1024)
        }
        self.rbd_client.create(pool_id, **create_rbd)
        status = self._wait_for_rbd_in_status(pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to create rbd %s!' % rbd_name)
        resp = self.rbd_client.query(pool_id)
        for i in range(len(resp)):
            if resp[i]['name'] == rbd_name:
                return resp[i]['id']
        raise exceptions.TestError('Create rbd %s failed' % rbd_name)

    def _wait_for_rbd_in_status(self, pool_id, rbd_name, status, timeout=300):
        status_map = {'copying': 6, 'ready': 0}

        def is_rbd_create():
            resp = self.rbd_client.query(pool_id)
            for i in range(len(resp)):
                if resp[i]['name'] == rbd_name:
                    if resp[i]['status'] == status_map[status]:
                        return True
            return False

        return utils_misc.wait_for(is_rbd_create,
                                   timeout,
                                   first=0,
                                   step=5,
                                   text='Waiting for rbd %s create.' %
                                   rbd_name)

    def _migrate_rbd(self, src_pool_id, des_pool_id, rbd_id, rbd_name):
        LOG.info("Try to migrate rbd %s" % rbd_name)
        move_rbd = {'target_pool': des_pool_id}
        resp = self.rbd_client.migrate(src_pool_id, rbd_id, **move_rbd)
        if not resp and utils.verify_response(self.body, resp):
            raise exceptions.TestFail("Migrate rbd failed: %s" % self.body)
        status = self._wait_for_rbd_in_status(des_pool_id, rbd_name, 'ready')
        if not status:
            raise exceptions.TestFail('Failed to migrate rbd %s!' % rbd_name)
        LOG.info('Migrate rbd %s successfully !' % rbd_name)

    def _get_servers_id(self):
        query_server = {'marker': 0, 'pagesize': 100}
        servers = self.servers_client.query(**query_server)
        if not len(servers) > 0:
            raise exceptions.TestFail("No available server found!")
        for server in servers:
            if server['group']['id'] == str(self.host_group_id):
                self.host_group_servers_id.append(server['id'])
            elif server['group']['id'] == str(self.rack_group_id):
                self.rack_group_servers_id.append(server['id'])
        LOG.info('Host group servers: %s' % self.host_group_servers_id)
        LOG.info('Rack group servers: %s' % self.rack_group_servers_id)

    def _get_osd_capacity(self, server_id):
        resp = self.osd_client.get_osd_capacity(server_id)
        if not len(resp) > 0:
            raise exceptions.TestFail("Query osd capacity failed")
        return resp.get('capacityUsed')

    def _get_osd_capacity_within_group(self, group_tag):
        total_capacity_used = 0
        if group_tag in 'host_group_':
            for server_id in self.host_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        elif group_tag in 'rack_group_':
            for server_id in self.rack_group_servers_id:
                total_capacity_used = total_capacity_used + \
                                      self._get_osd_capacity(server_id)
        return total_capacity_used

    def test(self):
        """
        1. Create host group with host level, and add 3 hosts to this group
        2. Create host group with rack level, and add 3 other hosts to this group
        3. Deploy cluster
        4. Create pool in host group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        5. Create pool in rack group, create rbd in this pool,
        and execute FIO r/w, check r/w works ok
        6. check osd capacity is changed only in the osd within r/w group
        7. Rbd migration: migrate rbd from pool 1 to pool 2,
        and execute FIO r/w, check r/w works ok
        8. Down one host from one group, and then w/r data in other group
        check data r/w in other group works ok
        """
        # Step 1: Create host group with host level, and add 3 hosts
        self.host_group_id = self._create_group(self.host_group_name, 'host')
        host_bucket_id = self._create_bucket(self.host_group_id)
        self.params['host_group_parent_bucket_1'] = host_bucket_id
        self.params['host_group_parent_bucket_2'] = host_bucket_id
        self.params['host_group_parent_bucket_3'] = host_bucket_id
        self._add_three_hosts("host_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.host_group_name)

        # Step 2: Create host group with rack level, and add 3 hosts
        self.rack_group_id = self._create_group(self.rack_group_name, 'rack')
        rack_bucket_id_1 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_2 = self._create_bucket(self.rack_group_id)
        rack_bucket_id_3 = self._create_bucket(self.rack_group_id)
        self.params['rack_group_parent_bucket_1'] = rack_bucket_id_1
        self.params['rack_group_parent_bucket_2'] = rack_bucket_id_2
        self.params['rack_group_parent_bucket_3'] = rack_bucket_id_3
        self._add_three_hosts("rack_group_")
        LOG.info("Added 3 hosts to group %s successfully!" %
                 self.rack_group_name)

        # Step 3: deploy cluster
        self._deploy_cluster()
        self._get_servers_id()

        # Step 4:create pool in host group, rbd, do FIO r/w, check r/w works ok
        self._copy_fio_package_to_host()
        self.host_group_pool_id, self.host_group_pool_name = \
            self._create_pool(self.host_group_id)
        self.host_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.host_group_rbd_id = self._create_rbd(self.host_group_pool_id,
                                                  self.host_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.host_group_rbd_name, self.host_group_pool_id))
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=True)

        # Step 5:create pool in rack group, rbd, do FIO r/w, check r/w works ok
        self.rack_group_pool_id, self.rack_group_pool_name = \
            self._create_pool(self.rack_group_id)
        self.rack_group_rbd_name = 'cloudtest_' \
                                   + utils_misc.generate_random_string(6)
        self.rack_group_rbd_id = self._create_rbd(self.rack_group_pool_id,
                                                  self.rack_group_rbd_name)
        LOG.info("Create rbd %s in pool %s" %
                 (self.rack_group_rbd_id, self.rack_group_pool_id))
        capacity_used_before = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("The previous used capacity is %s" % capacity_used_before)
        self._write_rbd(self.rack_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 6:check osd capacity is changed
        # only in the osd within r/w group
        capacity_used_after = self._get_osd_capacity_within_group(
            'host_group_')
        LOG.info("Later used capacity is %s" % capacity_used_after)
        if capacity_used_after < capacity_used_before * 0.95:
            raise exceptions.TestFail(
                "Do r/w in the osd of rack group, "
                "affect the used capacity of host group!")

        # Step 7:Rbd migration: migrate rbd from pool 1 to pool 2
        self._migrate_rbd(self.rack_group_pool_id, self.host_group_pool_id,
                          self.rack_group_rbd_id, self.rack_group_rbd_name)
        self._write_rbd(self.host_group_pool_name,
                        self.rack_group_rbd_name,
                        flag=False)

        # Step 8:Down one host from one group,
        # and then w/r data in other group
        test_utils.delete_osd(self.rack_group_servers_id[0], self.params)
        self.servers_client.delete_server(self.rack_group_servers_id[0])
        self._write_rbd(self.host_group_pool_name,
                        self.host_group_rbd_name,
                        flag=False)

    def teardown(self):
        """
        Some clean up work will be done here.
        """
        if self.fio_working_path is not None:
            # delete files
            cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath,
                                                  self.fio_version))
            cmd1 = 'pkill fio || true; '
            cmd2 = 'rm -rf %s %s' % (
                os.path.join(self.dstpath, self.fio_version),
                os.path.join(self.dstpath, self.fio_working_path))
            cmd = cmd1 + cmd2
            remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_password,
                cmd=cmd,
                cmd_mid=cmd_mid)
        if self.host_group_pool_id and self.host_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.host_group_rbd_id)
        if self.host_group_pool_id and self.rack_group_rbd_id:
            self.rbd_client.delete_rbd(self.host_group_pool_id,
                                       self.rack_group_rbd_id)
        if self.host_group_pool_id:
            self.pool_client.delete_pool(self.host_group_pool_id)
        if self.rack_group_pool_id:
            self.pool_client.delete_pool(self.rack_group_pool_id)
    def setup(self):
        """
        Set up before executing test
        1. to check if two clusters are available
        2. create one pool: testpool
        3. configure remote backup in the testpool
        """
        # to check if two cluster are available
        clusters = test_utils.get_available_clusters(self.params)
        if len(clusters) < 1:
            raise exceptions.TestSetupFail(
                'There are not enough clusters!')
        elif len(clusters) < 2:
            LOG.info('There are not enough clusters, try to create cluster!')
            self.cluster_id = self._create_cluster()
            self.params['cluster_id'] = self.cluster_id
            self.servers_client = ServersClient(self.params)
            for k, v in self.params.items():
                if 'rest_arg_cluster2_' in k:
                    new_key = k.split('rest_arg_cluster2_')[1]
                    self.create_servers_body[new_key] = v
            self._add_three_hosts()
            self._deploy_cluster()
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) < 2:
                raise exceptions.TestSetupFail(
                    'There are not enough clusters!')

        self.cluster_id = clusters[1]['id']
        self.params['cluster_id'] = self.cluster_id
        for cluster in clusters:
            if cluster['id'] != self.cluster_id:
                self.des_cluster_id = cluster['id']
                self.body['des_cluster_id'] = self.des_cluster_id
                break
        src_host = test_utils.get_available_server_info(self.params,
                                                        self.cluster_id)
        self.src_ip = src_host['publicip']
        self.body['src_ip'] = self.src_ip
        self.src_host_id = src_host['id']
        self.body['src_host_id'] = self.src_host_id
        des_host = test_utils.get_available_server_info(self.params,
                                                        self.des_cluster_id)
        self.des_ip = des_host['publicip']
        self.body['des_ip'] = self.des_ip
        self.des_host_id = des_host['id']
        self.body['des_host_id'] = self.des_host_id

        if self.params.get('pool_id'):
            self.pool_id = self.params.get('pool_id')
        else:
            self.pool_id = test_utils.create_pool(self.params)
            pool_client = PoolsClient(self.params)
            if not test_utils.wait_for_pool_in_state(self.pool_id, pool_client,
                                                     'ready'):
                raise exceptions.TestSetupFail("Failed to creating test pool!")
        self.params['pool_id'] = self.pool_id

        # configure remote backup in testpool
        LOG.info("Try to configure remote backup in pool %s : %s"
                 % (self.pool_id, self.body))
        self.client = RemoteBackupClient(self.params)
        self.client.configure_rbpolicy(**self.body)

        # other pre-conditions
        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password',
                                                'lenovo')
        self.initiator_ip = self.params.get('initiator_ip', self.src_ip)
        self.initiator_username = self.params.get('ceph_node_ssh_username',
                                                  'root')
        self.initiator_password = self.params.get('ceph_node_ssh_password',
                                                  'lenovo')
        # create iscsi client
        self.iscsi_client = ISCSIClient(self.params)
class TestRBDRemoteBackup(test.Test):
    """
    Module for testing RBD remote backup scenarios
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        self.body = {}
        self.create_servers_body = {}
        self.cluster_id = None
        self.pool_id = None
        self.target_id = None
        self.rbd_id = None
        self.clusters_client = ClustersClient(self.params)

    def setup(self):
        """
        Set up before executing test
        1. to check if two clusters are available
        2. create one pool: testpool
        3. configure remote backup in the testpool
        """
        # to check if two cluster are available
        clusters = test_utils.get_available_clusters(self.params)
        if len(clusters) < 1:
            raise exceptions.TestSetupFail(
                'There are not enough clusters!')
        elif len(clusters) < 2:
            LOG.info('There are not enough clusters, try to create cluster!')
            self.cluster_id = self._create_cluster()
            self.params['cluster_id'] = self.cluster_id
            self.servers_client = ServersClient(self.params)
            for k, v in self.params.items():
                if 'rest_arg_cluster2_' in k:
                    new_key = k.split('rest_arg_cluster2_')[1]
                    self.create_servers_body[new_key] = v
            self._add_three_hosts()
            self._deploy_cluster()
            clusters = test_utils.get_available_clusters(self.params)
            if len(clusters) < 2:
                raise exceptions.TestSetupFail(
                    'There are not enough clusters!')

        self.cluster_id = clusters[1]['id']
        self.params['cluster_id'] = self.cluster_id
        for cluster in clusters:
            if cluster['id'] != self.cluster_id:
                self.des_cluster_id = cluster['id']
                self.body['des_cluster_id'] = self.des_cluster_id
                break
        src_host = test_utils.get_available_server_info(self.params,
                                                        self.cluster_id)
        self.src_ip = src_host['publicip']
        self.body['src_ip'] = self.src_ip
        self.src_host_id = src_host['id']
        self.body['src_host_id'] = self.src_host_id
        des_host = test_utils.get_available_server_info(self.params,
                                                        self.des_cluster_id)
        self.des_ip = des_host['publicip']
        self.body['des_ip'] = self.des_ip
        self.des_host_id = des_host['id']
        self.body['des_host_id'] = self.des_host_id

        if self.params.get('pool_id'):
            self.pool_id = self.params.get('pool_id')
        else:
            self.pool_id = test_utils.create_pool(self.params)
            pool_client = PoolsClient(self.params)
            if not test_utils.wait_for_pool_in_state(self.pool_id, pool_client,
                                                     'ready'):
                raise exceptions.TestSetupFail("Failed to creating test pool!")
        self.params['pool_id'] = self.pool_id

        # configure remote backup in testpool
        LOG.info("Try to configure remote backup in pool %s : %s"
                 % (self.pool_id, self.body))
        self.client = RemoteBackupClient(self.params)
        self.client.configure_rbpolicy(**self.body)

        # other pre-conditions
        self.control_server_ip = self.params.get('ceph_management_url')
        self.control_server_ip = self.control_server_ip.split(':')[1].strip(
            '/')
        self.control_username = self.params.get('ceph_server_ssh_username',
                                                'root')
        self.control_password = self.params.get('ceph_server_ssh_password',
                                                'lenovo')
        self.initiator_ip = self.params.get('initiator_ip', self.src_ip)
        self.initiator_username = self.params.get('ceph_node_ssh_username',
                                                  'root')
        self.initiator_password = self.params.get('ceph_node_ssh_password',
                                                  'lenovo')
        # create iscsi client
        self.iscsi_client = ISCSIClient(self.params)

    def _create_cluster(self):
        create_cluster = {'name': self.params.get('cluster_name',
                                                  'cloudtest_cluster_2'),
                          'addr': self.params.get('cluster_addr', 'vm')}
        resp = self.clusters_client.create(**create_cluster)
        if not resp and utils.verify_response(create_cluster, resp):
            raise exceptions.TestSetupFail(
                "Create cluster failed: %s" % create_cluster)
        return resp.body.get('id')

    def _create_server(self, request_body):
        if not request_body.get('parent_bucket'):
            group_id, parent_id = \
                test_utils.get_available_group_bucket(self.params)
            request_body.update({'parent_bucket': parent_id})
        resp_body = self.servers_client.create(**request_body)
        body = resp_body.body
        status = test_utils.wait_for_server_in_status(
            'servername', request_body['servername'], self.servers_client,
            'added', 1, int(self.params.get('add_host_timeout', 600)))
        if not status:
            raise exceptions.TestFail("Failed to add server %s"
                                      % request_body['servername'])
        LOG.info('Create server %s successfully!'
                 % body['properties'].get('name'))

    def _add_three_hosts(self):
        parent_bucket = self.create_servers_body.get('parent_bucket')
        i = 1
        threads = []
        while self.create_servers_body.get('servername_%d' % i):
            tmp = 'servername_%d' % i
            servername = self.create_servers_body.get(tmp, 'cloudtest_server_%d' % i)
            tmp = 'username_%d' % i
            username = self.create_servers_body.get(tmp, 'root')
            tmp = 'password_%d' % i
            password = self.create_servers_body.get(tmp, 'lenovo')
            tmp = 'publicip_%d' % i
            publicip = self.create_servers_body.get(tmp)
            tmp = 'clusterip_%d' % i
            clusterip = self.create_servers_body.get(tmp)
            create_server_body = {'servername': servername,
                                  'username': username,
                                  'passwd': password,
                                  'publicip': publicip,
                                  'clusterip': clusterip,
                                  'parent_bucket': parent_bucket}
            t = threading.Thread(target=self._create_server,
                                 args=[create_server_body])
            threads.append(t)
            i = i + 1

        # waiting for all servers ready
        for t in threads:
            t.setDaemon(True)
            t.start()

        for i in range(0, len(threads)):
            try:
                threads[i].join(600)
            except Exception as details:
                LOG.exception('Caught exception waiting for server %d added : %s'
                              % (i, details))

    def _deploy_cluster(self):
        self.clusters_client.deploy_cluster(self.cluster_id)
        status = test_utils.wait_for_cluster_in_status(self.cluster_id,
                                                       self.clusters_client,
                                                       'deployed',
                           int(self.params.get('deploy_host_timeout', 900)))
        if not status:
            raise exceptions.TestFail("Failed to deploy cluster %d" %
                                      self.cluster_id)
        LOG.info("Deploy cluster %d successfully!" % self.cluster_id)

    def _start_rbtask(self, rbd_id):
        rbtask_body = {}
        rbtask_body['rbd_id'] = rbd_id
        resp_body = self.client.start_rbtask(**rbtask_body)
        body = resp_body.body
        LOG.info("Create remote backup %s for rbd %s"
                 % (body.get('id'), rbd_id))
        time.sleep(30)
        return body.get('id')

    def _create_iscsi_target(self):
        self.iscsi_target_name = "cloudtest" + \
                                 utils.utils_misc.generate_random_string(6)
        create_body = {'initiator_ips': self.initiator_ip,
                       'target_name': self.iscsi_target_name,
                       'multipath': self.params.get('multipath', 3)}

        resp = self.iscsi_client.create(**create_body)
        if not resp and utils.verify_response(create_body, resp):
            raise exceptions.TestFail("Create target failed: %s "
                                      % create_body)
        return resp.body['target_id']

    def _create_iscsi_lun(self, target_id, rbd_id):
        create_body = {'target_id': target_id,
                'pool_id': self.pool_id,
                'rbd_id': rbd_id}
        resp = self.iscsi_client.add_lun(**create_body)
        return resp.body['lun_id']

    def _delete_iscsi_lun(self, target_id, lun_id):
        body = {
            'target_id': target_id,
            'lun_id': lun_id}

        self.iscsi_client.delete_lun(**body)

    def _delete_target(self, target_id):
        """
        Test that deletion of delete target
        """
        self.iscsi_client.delete_iscsitarget(target_id)
        resp = self.iscsi_client.query()
        for i in range(len(resp)):
            if resp[i]['target_id'] == target_id:
                raise exceptions.TestFail("Delete target failed")

    def _create_and_bind_ISCSI_to_rbd(self, rbd_id):
        self.target_id = self._create_iscsi_target()
        self.lun_id = self._create_iscsi_lun(self.target_id, rbd_id)

    def _start_restore(self, rbd_id, timestamp):
        restore_body = {}
        restore_body['snap_time'] = timestamp
        resp_body = self.client.start_restore(rbd_id, **restore_body)
        LOG.info("Try to recover to remote backup %s!" % timestamp)
        time.sleep(30)
        body = resp_body.body
        return body.get('id')

    def _verify_task_successfully(self, rbtask_id, state):
        extra_url = '/list_rbtasks?count=1024&begin_index=0'
        rbtasks = self.client.list_rbtasks(extra_url)
        rb_record = None
        for rbtask in rbtasks:
            if rbtask['id'] == rbtask_id:
                rb_record = rbtask['properties']['timestamp']
                break
        if rb_record:
            status = test_utils.wait_for_remote_backup_or_restore_complete(rbtask_id,
                                                                          self.client,
                                                                          state,
                                                                          60)
            if status:
                LOG.info("%s successfully, the timestamp is %s"
                         % (state, rb_record))
                return rb_record

        raise exceptions.TestFail("Failed to %s!" % state)

    @staticmethod
    def _verify_file_exist(file_name, mount_point, actual, expected):
        if actual:
            LOG.info("Find %s under %s!" % (file_name, mount_point))
            if actual != expected:
                raise exceptions.TestFail("Expected not find the file %s."
                                          % file_name)
        else:
            LOG.info("%s not found under %s" % (file_name, mount_point))
            if actual != expected:
                raise exceptions.TestFail("Expected can find the file %s."
                                          % file_name)

    def test_rbd_remote_backup(self):
        """
        This test basically performs following steps:
            1. create rbd in testpool
            2. format disk
            3. create remote backup for this rbd(e.g.record1)
            4. write data to this rbd via ISCSI, include 1->11 steps
            5. create remote backup for this rbd(e.g.record2)
            6. recover rbd from record1
            7. repeat step3: sub-step 2)3)4)5)7)
            8. check testfile.txt does not exist
            9. do recover rbd from record2
            10. check testfile.txt exists
        """
        mount_point = self.params.get('mount_point', '/mnt')
        file_name = self.params.get('file_name', 'testfile.txt')
        # step1 create rbd in testpool
        self.rbd_id = test_utils.create_rbd(self.pool_id, self.params)
        LOG.info("Create rbd %s in pool %s" % (self.rbd_id, self.pool_id))
        # step2 format disk
        self._create_and_bind_ISCSI_to_rbd(self.rbd_id)
        time.sleep(60)
        need_mk = True
        create_data = False
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        self._verify_file_exist(file_name, mount_point, find, False)
        self._delete_iscsi_lun(self.target_id, self.lun_id)
        time.sleep(60)
        # step3 create remote backup for this rbd
        rbtask_id_1 = self._start_rbtask(self.rbd_id)
        rb_record_1 = self._verify_task_successfully(rbtask_id_1, 'backed_up')
        # step4 write data to this rbd via ISCSI
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)
        time.sleep(60)
        need_mk = False
        create_data = True
        # step4: sub-step 2)-10)
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        self._verify_file_exist(file_name, mount_point, find, True)
        # step4: sub-step 11)
        self._delete_iscsi_lun(self.target_id, self.lun_id)
        time.sleep(60)
        # step 5 create remote backup for this rbd
        rbtask_id_2 = self._start_rbtask(self.rbd_id)
        rb_record_2 = self._verify_task_successfully(rbtask_id_2, 'backed_up')
        # step 6 recover rbd from rb_record_1
        restore_id = self._start_restore(self.rbd_id, rb_record_1)
        self._verify_task_successfully(restore_id, 'restored')
        # step 7
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)
        time.sleep(60)
        need_mk = False
        create_data = False
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        # step 8 check testfile.txt does not exist
        self._verify_file_exist(file_name, mount_point, find, False)
        self._delete_iscsi_lun(self.target_id, self.lun_id)
        time.sleep(60)
        # step 9 do recover rbd from record2
        restore_id = self._start_restore(self.rbd_id, rb_record_2)
        self._verify_task_successfully(restore_id, 'restored')
        # step 10 verify testfile.txt exists
        self.lun_id = self._create_iscsi_lun(self.target_id, self.rbd_id)
        time.sleep(60)
        need_mk = False
        create_data = False
        find = test_utils.operate_iscsi(self.control_server_ip,
                                        self.control_username,
                                        self.control_password,
                                        self.initiator_ip,
                                        self.initiator_username,
                                        self.initiator_password,
                                        self.iscsi_target_name,
                                        self.initiator_ip, mount_point,
                                        file_name, need_mk, create_data)
        self._verify_file_exist(file_name, mount_point, find, True)
        self._delete_iscsi_lun(self.target_id, self.lun_id)

    def teardown(self):
        if self.target_id:
            self._delete_target(self.target_id)
        if self.rbd_id:
            try:
                test_utils.delete_rbd(self.pool_id, self.rbd_id, self.params)
            except exceptions.UnexpectedResponseCode, e:
                pass
Exemplo n.º 18
0
class ClusterRecovery(test.Test):
    """
    Module for test cluster recovery related operations.
    
    1. create pool
    2. create rbd
    3. run fio
    4. osd down, check cluster status and run fio second
    5. remove host, including remove osd and monitor, so monitor must follower
    6. check host remove successfully
    7. add host
    8. expand cluster
    9. add monitor for this host
    10. create pool, create rbd, run fio
    """
    def __init__(self, params, env):
        self.params = params
        self.env = env
        # storage server info for add server
        self.server = None
        self.cluster_client = ClustersClient(params)
        self.server_client = ServersClient(params)
        self.monitor_client = MonitorsClient(params)
        self.osd_client = OsdClient(params)
        self.dstpath = '/root'
        self.workload_path = data_dir.COMMON_TEST_DIR
        self.fio_version = self.params.get('fio_version')
        self.fio_working_path = \
            self.fio_version[0:len(self.fio_version) - len('.tar.gz')]

        self.server_name = None
        self.server_id = None

    def setup(self):
        ceph_server_ip = self.params.get('ceph_management_url')
        self.mid_host_ip = ceph_server_ip.split(':')[1].strip('/')
        self.cluster_id = self.params.get('cluster_id')
        self.mid_host_user = self.params.get('ceph_server_ssh_username')
        self.mid_host_password = self.params.get('ceph_server_ssh_password')
        self.end_host_user = self.params.get('ceph_node_ssh_username')
        self.end_host_passwprd = self.params.get('ceph_node_ssh_password')

        self.end_host_ip = test_utils.get_available_host_ip(self.params)

    def test(self):
        LOG.info('Copy file %s from local to %s' %
                 (self.fio_version, self.mid_host_ip))
        remote.scp_to_remote(host=self.mid_host_ip,
                             port=22,
                             username=self.mid_host_user,
                             password=self.mid_host_password,
                             local_path=os.path.join(self.workload_path,
                                                     self.fio_version),
                             remote_path=self.dstpath)
        LOG.info('Copy file %s from %s to %s' %
                 (self.fio_version, self.mid_host_ip, self.end_host_ip))
        remote.scp_between_remotes(src=self.mid_host_ip,
                                   dst=self.end_host_ip,
                                   port=22,
                                   s_passwd=self.mid_host_password,
                                   d_passwd=self.end_host_passwprd,
                                   s_name=self.mid_host_user,
                                   d_name=self.end_host_user,
                                   s_path=os.path.join(self.dstpath,
                                                       self.fio_version),
                                   d_path=self.dstpath)
        self.__create_pool()
        self.__create_rbd()
        self.__write_rbd(True)

        self.__get_available_server()
        self.__get_available_osd()
        self.__down_osd()

        # self.__check_cluster_status()
        status = self.__wait_for_ceph_in_status()
        if not status:
            raise exceptions.TestFail('Cluster status must be HEALTH_OK, '
                                      'or HEALTH_WARN for clock skew detected')
        self.__write_rbd()

        self.__del_osd()
        self.__del_monitor()
        self.__del_server()

        self.server_name = test_utils.add_server(
            self.server_client, self.params.get('rest_arg_servername'),
            self.params.get('rest_arg_username'),
            self.params.get('rest_arg_password'),
            self.params.get('rest_arg_publicip'),
            self.params.get('rest_arg_clusterip'),
            self.params.get('rest_arg_managerip'),
            self.params.get('rest_arg_parent_bucket'))
        test_utils.expand_cluster(self.cluster_client, self.server_client,
                                  self.cluster_id, self.server_name)

        self.__create_monitor()
        status = self.__wait_for_ceph_in_status()
        if not status:
            raise exceptions.TestFail('Cluster status must be HEALTH_OK, '
                                      'or HEALTH_WARN for clock skew detected')
        # self.__check_cluster_status()
        self.__is_monitor_added()
        self.__create_pool()
        self.__create_rbd()
        self.__write_rbd()

    def __get_available_server(self):
        self.server_id = test_utils.get_available_server(self.params)
        body = {}
        servers = self.server_client.query(**body)
        for server in servers:
            if server.get('id') == self.server_id:
                self.server = server

    def __get_available_osd(self):
        self.osd_id = test_utils.get_available_osd(self.server_id, self.params)

    def __down_osd(self):
        resp = self.osd_client.stop_osd(self.server_id, self.osd_id)
        if resp.get('status') != 'down':
            raise exceptions.TestFail("Stop osd '%s' failed" % self.osd_id)

    def __del_osd(self):
        test_utils.delete_osd(self.server_id, self.params)

    def __del_monitor(self):
        test_utils.delete_monitor(self.cluster_id, self.server_id, self.params)

    def __is_monitor_added(self):
        monitors = self.monitor_client.query(self.cluster_id)
        for monitor in monitors:
            if monitor.get('host') == self.server.get('servername'):
                return
        raise exceptions.TestFail('Failed to add monitor to %s' %
                                  self.server.get('servername'))

    def __del_server(self):
        self.server_client.delete_server(self.server_id)
        body = {}
        servers = self.server_client.query(**body)
        for server in servers:
            if server.get('id') == self.server_id:
                raise exceptions.TestFail('Failed to delete server %s' %
                                          self.server_id)
        LOG.info('Delete server successfully !')

    def __create_monitor(self):
        LOG.info('Create monitor ...')
        t1 = threading.Thread(target=self.monitor_client.create,
                              args=[self.cluster_id, self.server_id])
        t1.start()
        time.sleep(50)

    def __check_cluster_status(self):
        time.sleep(400)
        stdout_msg = remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd='ceph -s',
            timeout=1000)
        pat = 'health (.*)'
        result = re.findall(pat, stdout_msg)
        if len(result) > 0:
            if result[0] not in ('HEALTH_OK', 'HEALTH_WARN'):
                raise exceptions.TestFail('Cluster status must be HEALTH_OK, '
                                          'or HEALTH_WARN not %s' % result[0])
            if 'HEALTH_WARN' in result[0]:
                pat = 'Monitor clock skew detected'
                war_msg = re.findall(pat, stdout_msg)
                if not len(war_msg):
                    raise exceptions.TestFail(
                        'Cluster status must be HEALTH_OK, '
                        'or HEALTH_WARN for clock skew detected')
        else:
            raise exceptions.TestFail('Msg data error, please check !')
        LOG.info('Cluster recovery successfully !')

    def __wait_for_ceph_in_status(self):
        def is_in_status():
            stdout_msg = remote.run_cmd_between_remotes(
                mid_host_ip=self.mid_host_ip,
                mid_host_user=self.mid_host_user,
                mid_host_password=self.mid_host_password,
                end_host_ip=self.end_host_ip,
                end_host_user=self.end_host_user,
                end_host_password=self.end_host_passwprd,
                cmd='ceph -s',
                timeout=1000)
            pat = 'health (.*)'
            result = re.findall(pat, stdout_msg)
            if len(result) > 0:
                if result[0] not in ('HEALTH_OK', 'HEALTH_WARN'):
                    return False
                if 'HEALTH_WARN' in result[0]:
                    pat = 'Monitor clock skew detected'
                    war_msg = re.findall(pat, stdout_msg)
                    if not len(war_msg):
                        return False
            else:
                raise exceptions.TestFail('Msg data error, please check !')
            return True

        return utils_misc.wait_for(is_in_status,
                                   timeout=1000,
                                   first=0,
                                   step=50,
                                   text='Waiting for ceph in status')

    def __create_pool(self):
        resp = test_utils.create_pool(self.params, flag=True)
        self.pool_id = resp.get('id')
        self.pool_name = resp.get('name')

    def __create_rbd(self):
        resp = test_utils.create_rbd_with_capacity(self.pool_id, self.params,
                                                   RBD_CAPACITY, True)
        self.rbd_id = resp.get('id')
        self.rbd_name = resp.get('name')

    def __write_rbd(self, flag=False):
        cmd1 = 'cd %s;' % self.fio_working_path
        cmd2 = './fio -ioengine=rbd -clientname=admin '
        cmd3 = '-pool=%s -rw=write -bs=1M -iodepth=128 -numjobs=1 -direct=1 ' % \
               self.pool_name
        cmd4 = '-size=2M -group_reporting -rbdname=%s -name=mytest' % \
               self.rbd_name
        cmd = cmd1 + cmd2 + cmd3 + cmd4
        if flag:
            cmd = 'tar -xzvf %s;' % self.fio_version + cmd
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            timeout=1000)

    def teardown(self):
        # delete files
        cmd_mid = 'rm -rf %s' % (os.path.join(self.dstpath, self.fio_version))
        cmd1 = 'pkill fio || true; '
        cmd2 = 'rm -rf %s %s' % (os.path.join(self.dstpath, self.fio_version),
                                 os.path.join(self.dstpath,
                                              self.fio_working_path))
        cmd = cmd1 + cmd2
        remote.run_cmd_between_remotes(
            mid_host_ip=self.mid_host_ip,
            mid_host_user=self.mid_host_user,
            mid_host_password=self.mid_host_password,
            end_host_ip=self.end_host_ip,
            end_host_user=self.end_host_user,
            end_host_password=self.end_host_passwprd,
            cmd=cmd,
            cmd_mid=cmd_mid)

        LOG.info("added server name is %s" % self.server_name)
        if self.server_name is not None:
            self.server_id = test_utils.get_server_id_by_name(
                self.params, self.server_name)
            LOG.info("server id is %s" % self.server_id)
        if self.server_id is not None:
            LOG.info('Begin to sleep 60s ...')
            time.sleep(60)
            test_utils.delete_osd(self.server_id, self.params)
            test_utils.del_server(self.server_client, self.server_id)
Exemplo n.º 19
0
def get_available_disk(server_id, params):
    server_client = ServersClient(params)
    return server_client.get_server_disks(server_id)