def setup(self): if 'cluster' in self.env: self.cluster_id = self.env['cluster'] elif self.params.get('cluster_id'): self.cluster_id = self.params.get('cluster_id') else: clusters = test_utils.get_available_clusters(self.params) if len(clusters) > 0: self.cluster_id = clusters[0]['id'] self.params['cluster_id'] = self.cluster_id self.pool_client = PoolsClient(self.params) if 'pool_name' in self.env: self.pool_name = self.env['pool_name'] else: self.pool_name = self.params.get('pool_name', 'rbd') self.params['pool_name'] = self.pool_name if self.pool_name is not None: resp = self.pool_client.query() for i in range(len(resp)): if resp[i]['name'] == self.pool_name: self.pool_id = resp[i]['id'] else: self.pool_id = test_utils.create_pool(self.params) LOG.info("Created pool that id is %s" % self.pool_id) self.params['pool_id'] = self.pool_id self.rbd_client = RbdClient(self.params) self.iscsi_client = ISCSIClient(self.params)
def test_set_ec_pool_cache(self): """ Set up cache for EC pool """ pool_id = self.env.get('pool_tmp_id') vgroup_id = self.env.get('vgroup_id', 1) cache_pool = test_utils.create_pool(self.params, flag=True, vgroup_id=vgroup_id) self.env['cache_pool_id'] = cache_pool.get('pool_id') self.env['cache_pool_name'] = cache_pool.get('name') if self.params.get('NO_EC', "true") == "true": raise exceptions.TestSkipError("There is not EC pool") else: set_cache = { 'cache_pool_id': cache_pool.get('pool_id'), 'cache_pool_name': cache_pool.get('name'), 'cache_size': 107374182400, 'target_dirty_radio': 30, 'target_full_radio': 70, 'option': 'set_cache', 'caching_mode': 'writeback', } resp = self.client.set_cache(pool_id, **set_cache) LOG.info('Rest Response: %s' % resp) if not resp and utils.verify_response(self.body, resp): raise exceptions.TestFail("Set up EC pool cache failed: %s" % self.body)
def _create_rbd(self): RBD_CAPACITY = 1024 * 1024 self.pool_id = test_utils.create_pool(self.params) self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id, self.params, RBD_CAPACITY) self.rbd_id = self.rbd_response.get('id') self.rbd_name = self.rbd_response.get('name')
def test_edit_pool(self): group_id = 1 # Creating 1M rbd RBD_CAPACITY = 1024 * 1024 self.pool_response = test_utils.create_pool(self.params, flag=True) self.pool_name = self.pool_response.get('name') self.pool_id = self.pool_response.get('id') self.rbd_response = test_utils.create_rbd_with_capacity( self.pool_id, self.params, RBD_CAPACITY) self.rbd_id = self.rbd_response.get('id') self.rbd_name = self.rbd_response.get('name') self.fio_working_path = \ self.fio_version[0:len(self.fio_version) - len('.tar.gz')] LOG.info('Copy file %s from local to %s' % (self.fio_version, self.mid_host_ip)) remote.scp_to_remote(host=self.mid_host_ip, port=22, username=self.mid_host_user, password=self.mid_host_password, local_path=os.path.join(self.workload_path, self.fio_version), remote_path=self.dstpath) remote.scp_between_remotes(src=self.mid_host_ip, dst=self.end_host_ip, port=22, s_passwd=self.mid_host_password, d_passwd=self.end_host_passwprd, s_name=self.mid_host_user, d_name=self.end_host_user, s_path=os.path.join(self.dstpath, self.fio_version), d_path=self.dstpath) self._write_rbd(self.pool_name, self.rbd_name, flag=True) self._check_rbd_write(self.pool_id, self.rbd_name, 0, 0) # Update the size and pg_num to the pool replicate = 2 pg_num = 80 self._update_pool(self.pool_id, replicate, group_id, pg_num) self.pool_name = \ self._query_pool(self.pool_id, group_id, replicate, pg_num) self._write_rbd(self.pool_name, self.rbd_name, flag=True) self._check_rbd_write(self.pool_id, self.rbd_name, 0, 1024 * 1024) # Update the group to the pool group_id = 1 self._update_pool(self.pool_id, replicate, group_id, pg_num) self.pool_name = \ self._query_pool(self.pool_id, group_id, replicate, pg_num) self._write_rbd(self.pool_name, self.rbd_name, flag=True) self._check_rbd_write(self.pool_id, self.rbd_name, 0, 1024 * 1024)
def _migrate_rbd(self, rbd_id): """ Test that migration of specified rdb """ target_pool = test_utils.create_pool(self.params) move_rbd = {'target_pool': str(target_pool)} resp = self.rbd_client.migrate(self.pool_id, rbd_id, **move_rbd) LOG.info('Rest Response: %s' % resp) if not resp: raise exceptions.TestFail("Migarate rbd failed") return target_pool
def test(self): self.pool_id = test_utils.create_pool(self.params) self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id, self.params, RBD_CAPACITY) self.rbd_id=self.rbd_response.get('id') self.rbd_name=self.rbd_response.get('name') self.__create_snapshot() self.__update_rbd(self.rbd_name) self.snapshot_id = self.__create_snapshot() self.__query_snapshot() self.__clone_snapshot()
def _migrate_rbd(self): """Migrate this rbd to target pool""" LOG.info("Start migrate rbd to new pool!") self.target_pool = test_utils.create_pool(self.params) move_rbd = {'target_pool': str(self.target_pool)} resp = self.rbd_client.migrate(self.pool_id, self.rbd_id, **move_rbd) # LOG.info('Rest Response: %s' % resp) time.sleep(60) if resp.response['status'] != '200': raise exceptions.TestFail("Migrate rbd failed: %s" % self.body) self.env['pool_tmp_id'] = self.target_pool self.pool_id, self.target_pool = self.target_pool, self.pool_id LOG.info("Migrate rbd to new pool success!")
def test_migrate(self): """ Test that migration of specified rdb """ rbd_id = self.env.get('rbd_tmp_id') vgroup_id = self.env.get('vgroup_id') target_pool = test_utils.create_pool(self.params, vgroup_id=vgroup_id) time.sleep(60) move_rbd = {'target_pool': str(target_pool)} resp = self.client.migrate(self.pool_id, rbd_id, **move_rbd) LOG.info('Rest Response: %s' % resp) if not resp and utils.verify_response(self.body, resp): raise exceptions.TestFail("Migarate rbd failed: %s" % self.body) self.env['pool_target_id'] = target_pool
def setup(self): if 'cluster' in self.env: self.cluster_id = self.env['cluster'] elif self.params.get('cluster_id'): self.cluster_id = self.params.get('cluster_id') if self.params.get('pool_id'): self.pool_id = self.params.get('pool_id') else: self.pool_id = test_utils.create_pool(self.params) LOG.info("pool_id is %s" % self.pool_id) if self.params.get('initiator_ip'): self.initiator_ip = test_utils.get_available_host_ip(self.params)
def test(self): LOG.info('Copy file %s from local to %s' % (self.fio_version, self.mid_host_ip)) remote.scp_to_remote(host=self.mid_host_ip, port=22, username=self.mid_host_user, password=self.mid_host_password, local_path=os.path.join(self.workload_path, self.fio_version), remote_path=self.dstpath) LOG.info('Copy file %s from %s to %s' % (self.fio_version, self.mid_host_ip, self.end_host_ip)) remote.scp_between_remotes(src=self.mid_host_ip, dst=self.end_host_ip, port=22, s_passwd=self.mid_host_password, d_passwd=self.end_host_password, s_name=self.mid_host_user, d_name=self.end_host_user, s_path=os.path.join(self.dstpath, self.fio_version), d_path=self.dstpath) self.pool_response = test_utils.create_pool(self.params, flag=True) self.pool_name = self.pool_response.get('name') self.pool_id = self.pool_response.get('id') self.rbd_response = test_utils.create_rbd_with_capacity(self.pool_id, self.params, RBD_CAPACITY) self.rbd_id = self.rbd_response.get('id') self.rbd_name = self.rbd_response.get('name') self.rbd_client = RbdClient(self.params) self.params['rbds_id'] = self.rbd_id self.params['pool_id'] = self.pool_id self.qos_client = QosClient(self.params) self.__test_operation(property_type='iops', rw='randwrite', flag=True) self.__test_operation(property_type='iops', rw='randread') self.__test_operation(property_type='iops', rw='randrw', rw_type='rwmixread', rw_value=70) self.__test_operation(property_type='bw', rw='randwrite') self.__test_operation(property_type='bw', rw='randread') self.__test_operation(property_type='bw', rw='randrw', rw_type='rwmixread', rw_value=70)
def test_resize_migrage_delaydel(self): # Create rbd in the pool capacity = 1024 * 1024 * 1 self.pool_id = test_utils.create_pool(self.params) self.rbd_id = test_utils.create_rbd_with_capacity( self.pool_id, self.params, capacity, False) self._check_specified_rbd_size(self.rbd_id, capacity) new_name = 'cloudtest_new' + utils_misc.generate_random_string(6) updated_capacity = 1024 * 1024 * 2 self._update_rdb_capacity(self.rbd_id, new_name, updated_capacity) self.target_pool = self._migrate_rbd(self.rbd_id) time.sleep(120) self._check_rbd_pool(self.rbd_id, self.target_pool) self._delay_delete_rbd(self.target_pool, self.rbd_id) self._check_delay_delete_rbd_list()
def test_create(self): snapshot_group_body_list = [] snap_name = 'cloudtest_' + utils_misc.generate_random_string(6) for i in range(self.pool_count): pool_id = test_utils.create_pool(self.params) self.env['pool_id_%s' % i] = pool_id for j in range(self.rbd_count_of_per_pool): rbd_id = test_utils.create_rbd(pool_id=pool_id, params=self.params) self.env['rbd_id_%s_%s' % (i, j)] = rbd_id snapshot_group_body = { "pool_id": pool_id, "rbd_id": rbd_id, "snap_name": snap_name } snapshot_group_body_list.append(snapshot_group_body) resp = self.client.create(snapshot_group_body_list) body = resp.body self.env['snapshot_group_id'] = body.get('id') self.env['snapshot_group_name'] = body.get('name') LOG.info("Create snapshot_group successfully: %s" % body)
def setup(self): """Set up before execute test""" if 'cluster' in self.env: self.cluster_id = self.env['cluster'] elif self.params.get('cluster_id'): self.cluster_id = self.params.get('cluster_id') if self.params.get("pool_id"): self.pool_id = self.params.get('pool_id') else: self.pool_id = test_utils.create_pool(self.params) LOG.info("pool_id id %s " % self.pool_id) if self.params.get('initiator_ip'): self.initiator_ip = self.params.get('initiator_ip') else: self.initiator_ip = test_utils.get_available_host_ip(self.params) for k, v in self.params.items(): if 'rest_arg_' in k: new_key = k.split('rest_arg_')[1] self.body[new_key] = v
def test_image_write_read(self): RBD_CAPACITY = 10485760 self.fio_working_path = \ self.fio_version[0:len(self.fio_version) - len('.tar.gz')] LOG.info('Copy file %s from local to %s' % (self.fio_version, self.mid_host_ip)) remote.scp_to_remote(host=self.mid_host_ip, port=22, username=self.mid_host_user, password=self.mid_host_password, local_path=os.path.join(self.workload_path, self.fio_version), remote_path=self.dstpath) LOG.info('Copy file %s from %s to %s' % (self.fio_version, self.mid_host_ip, self.end_host_ip)) remote.scp_between_remotes(src=self.mid_host_ip, dst=self.end_host_ip, port=22, s_passwd=self.mid_host_password, d_passwd=self.end_host_password, s_name=self.mid_host_user, d_name=self.end_host_user, s_path=os.path.join(self.dstpath, self.fio_version), d_path=self.dstpath) self.pool_response_before = test_utils.create_pool(self.params, flag=True) self.pool_name_before = self.pool_response_before.get('name') self.pool_id_before = self.pool_response_before.get('id') self.rbd_response_before = test_utils.create_rbd_with_capacity( self.pool_id_before, self.params, RBD_CAPACITY) self.rbd_id_before = self.rbd_response_before.get('id') self.rbd_name_before = self.rbd_response_before.get('name') self.__write_rbd(self.pool_name_before, self.rbd_name_before, flag=True) self.__check_rbd_write(self.pool_id_before, self.rbd_name_before) self.server_name = test_utils.add_server( self.server_client, self.params.get('rest_arg_servername'), self.params.get('rest_arg_username'), self.params.get('rest_arg_password'), self.params.get('rest_arg_publicip'), self.params.get('rest_arg_clusterip'), self.params.get('rest_arg_managerip'), self.params.get('rest_arg_parent_bucket')) LOG.info("added server name is %s" % self.server_name) test_utils.expand_cluster(self.cluster_client, self.server_client, self.cluster_id, self.server_name) self.pool_response_after = test_utils.create_pool(self.params, flag=True) self.pool_name_after = self.pool_response_after.get('name') self.pool_id_after = self.pool_response_after.get('id') self.rbd_response_after = test_utils.create_rbd_with_capacity( self.pool_id_after, self.params, RBD_CAPACITY) self.rbd_id_after = self.rbd_response_after.get('id') self.rbd_name_after = self.rbd_response_after.get('name') self.__write_rbd(self.pool_name_before, self.rbd_name_before) self.__check_rbd_write(self.pool_id_before, self.rbd_name_before) self.__write_rbd(self.pool_name_after, self.rbd_name_after) self.__check_rbd_write(self.pool_id_after, self.rbd_name_after)
def setup(self): """ Set up before executing test 1. to check if two clusters are available 2. create one pool: testpool 3. configure remote backup in the testpool """ # to check if two cluster are available clusters = test_utils.get_available_clusters(self.params) if len(clusters) < 1: raise exceptions.TestSetupFail( 'There are not enough clusters!') elif len(clusters) < 2: LOG.info('There are not enough clusters, try to create cluster!') self.cluster_id = self._create_cluster() self.params['cluster_id'] = self.cluster_id self.servers_client = ServersClient(self.params) for k, v in self.params.items(): if 'rest_arg_cluster2_' in k: new_key = k.split('rest_arg_cluster2_')[1] self.create_servers_body[new_key] = v self._add_three_hosts() self._deploy_cluster() clusters = test_utils.get_available_clusters(self.params) if len(clusters) < 2: raise exceptions.TestSetupFail( 'There are not enough clusters!') self.cluster_id = clusters[1]['id'] self.params['cluster_id'] = self.cluster_id for cluster in clusters: if cluster['id'] != self.cluster_id: self.des_cluster_id = cluster['id'] self.body['des_cluster_id'] = self.des_cluster_id break src_host = test_utils.get_available_server_info(self.params, self.cluster_id) self.src_ip = src_host['publicip'] self.body['src_ip'] = self.src_ip self.src_host_id = src_host['id'] self.body['src_host_id'] = self.src_host_id des_host = test_utils.get_available_server_info(self.params, self.des_cluster_id) self.des_ip = des_host['publicip'] self.body['des_ip'] = self.des_ip self.des_host_id = des_host['id'] self.body['des_host_id'] = self.des_host_id if self.params.get('pool_id'): self.pool_id = self.params.get('pool_id') else: self.pool_id = test_utils.create_pool(self.params) pool_client = PoolsClient(self.params) if not test_utils.wait_for_pool_in_state(self.pool_id, pool_client, 'ready'): raise exceptions.TestSetupFail("Failed to creating test pool!") self.params['pool_id'] = self.pool_id # configure remote backup in testpool LOG.info("Try to configure remote backup in pool %s : %s" % (self.pool_id, self.body)) self.client = RemoteBackupClient(self.params) self.client.configure_rbpolicy(**self.body) # other pre-conditions self.control_server_ip = self.params.get('ceph_management_url') self.control_server_ip = self.control_server_ip.split(':')[1].strip( '/') self.control_username = self.params.get('ceph_server_ssh_username', 'root') self.control_password = self.params.get('ceph_server_ssh_password', 'lenovo') self.initiator_ip = self.params.get('initiator_ip', self.src_ip) self.initiator_username = self.params.get('ceph_node_ssh_username', 'root') self.initiator_password = self.params.get('ceph_node_ssh_password', 'lenovo') # create iscsi client self.iscsi_client = ISCSIClient(self.params)
def __create_pool(self): resp = test_utils.create_pool(self.params, flag=True) self.pool_id = resp.get('id') self.pool_name = resp.get('name')
def setup(self): """ Set up before executing test 1. Cluster is deployed 2. 5 100G rbd/image are created, and sequence writen 3. Zabbix server and account is configured 4. More hosts are available for adding """ # cluster is deployed if 'cluster' in self.env: self.cluster_id = self.env['cluster'] elif self.params.get('cluster_id'): self.cluster_id = self.params.get('cluster_id') else: raise exceptions.TestSetupFail( "Please set cluster_id in config first") self.pool_response = test_utils.create_pool(self.params, flag=True) self.pool_name = self.pool_response.get('name') self.pool_id = self.pool_response.get('id') # 5 100G rbd/image are created, and sequence writen self._copy_fio_package_to_host() self.params['capacity'] = 1024 * 1024 * 1024 * 100 rw = self.params.get('fio_rw', 'write') bs = self.params.get('fio_bs', '1M') iodepth = self.params.get('fio_iodepth', 128) size = self.params.get('fio_write_size', '100G') flag = True for i in range(0, 5): rbd_name = 'cloudtest_' + utils_misc.generate_random_string(6) self.params['rbd_name'] = rbd_name rbd_id = test_utils.create_rbd(self.pool_id, self.params) LOG.info("Create rbd %s in pool %s" % (rbd_name, self.pool_id)) self.rbds_id.append(rbd_id) self._write_rbd(rbd_name=rbd_name, rw=rw, bs=bs, iodepth=iodepth, size=size, flag=flag) flag = False zabbix_ip = remote.get_zabbix_server_ip(self.mid_host_ip, self.mid_host_user, self.mid_host_password) self.params['rest_arg_zabbix_server_ip'] = zabbix_ip self.params['rest_arg_ntp_server_ip'] = zabbix_ip # Zabbix server and account is configured for k, v in self.params.items(): if 'rest_arg_' in k: new_key = k.split('rest_arg_')[1] self.body[new_key] = v self._set_cluster_conf() zabbix_group_id = self.zabbix_client.get_host_group("cloudCeph") LOG.info("The cloudCeph group id in zabbix is %s" % zabbix_group_id) if zabbix_group_id: host_id = self.zabbix_client.get_host_id_by_group_id( zabbix_group_id) LOG.info("Monitored host id in zabbix is %s" % host_id) if host_id: self.recover_item_id = self.zabbix_client.get_item_id( host_id, "ceph.cluster.recovering_bytes") LOG.info( "The ceph.cluster.recovering_bytes item id in zabbix is %s" % self.recover_item_id) else: raise exceptions.TestSetupFail( "Zabbix server error, cannot get host id!") if self.recover_item_id is None: exceptions.TestSetupFail( "Cannot get the value of ceph.cluster.recovering_bytes!")