def test_crush_rule_info(self): self._get('/ui-api/crush_rule/info') self.assertStatus(200) self.assertSchemaBody( JObj({ 'names': JList(six.string_types), 'nodes': JList(JObj({}, allow_unknown=True)) }))
def _validate_image(self, img, **kwargs): """ Example of an RBD image json: { "size": 1073741824, "obj_size": 4194304, "num_objs": 256, "order": 22, "block_name_prefix": "rbd_data.10ae2ae8944a", "name": "img1", "pool_name": "rbd", "features": 61, "features_name": ["deep-flatten", "exclusive-lock", "fast-diff", "layering", "object-map"] } """ schema = JObj(sub_elems={ 'size': JLeaf(int), 'obj_size': JLeaf(int), 'num_objs': JLeaf(int), 'order': JLeaf(int), 'block_name_prefix': JLeaf(str), 'name': JLeaf(str), 'id': JLeaf(str), 'pool_name': JLeaf(str), 'namespace': JLeaf(str, none=True), 'features': JLeaf(int), 'features_name': JList(JLeaf(str)), 'stripe_count': JLeaf(int, none=True), 'stripe_unit': JLeaf(int, none=True), 'parent': JObj(sub_elems={'pool_name': JLeaf(str), 'pool_namespace': JLeaf(str, none=True), 'image_name': JLeaf(str), 'snap_name': JLeaf(str)}, none=True), 'data_pool': JLeaf(str, none=True), 'snapshots': JList(JLeaf(dict)), 'timestamp': JLeaf(str, none=True), 'disk_usage': JLeaf(int, none=True), 'total_disk_usage': JLeaf(int, none=True), 'configuration': JList(JObj(sub_elems={ 'name': JLeaf(str), 'source': JLeaf(int), 'value': JLeaf(str), })), }) self.assertSchema(img, schema) for k, v in kwargs.items(): if isinstance(v, list): self.assertSetEqual(set(img[k]), set(v)) else: self.assertEqual(img[k], v)
def test_check_token(self): self.login("admin", "admin") self._post("/api/auth/check", {"token": self.jsonBody()["token"]}) self.assertStatus(200) data = self.jsonBody() self.assertSchema( data, JObj(sub_elems={ "username": JLeaf(str), "permissions": JObj(sub_elems={}, allow_unknown=True), "sso": JLeaf(bool), "pwdUpdateRequired": JLeaf(bool) }, allow_unknown=False)) self.logout()
def test_login_valid(self): self._post("/api/auth", {'username': '******', 'password': '******'}) self.assertStatus(201) data = self.jsonBody() self.assertSchema( data, JObj(sub_elems={ 'token': JLeaf(str), 'username': JLeaf(str), 'permissions': JObj(sub_elems={}, allow_unknown=True), 'sso': JLeaf(bool), 'pwdExpirationDate': JLeaf(int, none=True), 'pwdUpdateRequired': JLeaf(bool) }, allow_unknown=False)) self._validate_jwt_token(data['token'], "admin", data['permissions'])
def test_pool_info(self): self._get("/ui-api/pool/info") self.assertSchemaBody( JObj({ 'pool_names': JList(six.string_types), 'compression_algorithms': JList(six.string_types), 'compression_modes': JList(six.string_types), 'is_all_bluestore': bool, 'bluestore_compression_algorithm': six.string_types, 'osd_count': int, 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)), 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)), 'pg_autoscale_default_mode': six.string_types, 'pg_autoscale_modes': JList(six.string_types), 'erasure_code_profiles': JList(JObj({}, allow_unknown=True)), 'used_rules': JObj({}, allow_unknown=True), }))
def test_snapshots(self): fs_id = self.get_fs_id() self.mk_dirs('/movies/dune/extended_version') self._post("/api/cephfs/{}/mk_snapshot".format(fs_id), params={'path': '/movies/dune', 'name': 'test'}) self.assertStatus(200) data = self.ls_dir('/movies', 1) self.assertSchema(data[0], JObj(sub_elems={ 'name': JLeaf(str), 'path': JLeaf(str), 'parent': JLeaf(str), 'snapshots': JList(JObj(sub_elems={ 'name': JLeaf(str), 'path': JLeaf(str), 'created': JLeaf(str) })), 'quotas': JObj(sub_elems={ 'max_bytes': JLeaf(int), 'max_files': JLeaf(int) }) })) snapshots = data[0]['snapshots'] self.assertEqual(len(snapshots), 1) snapshot = snapshots[0] self.assertEqual(snapshot['name'], "test") self.assertEqual(snapshot['path'], "/movies/dune/.snap/test") # Should have filtered out "_test_$timestamp" data = self.ls_dir('/movies/dune', 1) snapshots = data[0]['snapshots'] self.assertEqual(len(snapshots), 0) self._post("/api/cephfs/{}/rm_snapshot".format(fs_id), params={'path': '/movies/dune', 'name': 'test'}) self.assertStatus(200) data = self.ls_dir('/movies', 1) self.assertEqual(len(data[0]['snapshots']), 0) # Cleanup. Note, the CephFS Python extension (and therefor the Dashboard # REST API) does not support recursive deletion of a directory. self.rm_dir('/movies/dune/extended_version') self.rm_dir('/movies/dune') self.rm_dir('/movies')
def test_check_wo_token(self): self.login("admin", "admin") self._post("/api/auth/check", {"token": ""}) self.assertStatus(200) data = self.jsonBody() self.assertSchema( data, JObj(sub_elems={"login_url": JLeaf(str)}, allow_unknown=False)) self.logout()
def test_osd_devices(self): data = self._get('/api/osd/0/devices') self.assertStatus(200) self.assertSchema( data, JList( JObj({ 'daemons': JList(str), 'devid': str, 'location': JList(JObj({ 'host': str, 'dev': str, 'path': str })) })))
def _assert_user_data(self, data): self.assertSchema( data, JObj(sub_elems={ 'caps': JList(JObj(sub_elems={}, allow_unknown=True)), 'display_name': JLeaf(str), 'email': JLeaf(str), 'keys': JList(JObj(sub_elems={}, allow_unknown=True)), 'max_buckets': JLeaf(int), 'subusers': JList(JLeaf(str)), 'suspended': JLeaf(int), 'swift_keys': JList(JObj(sub_elems={}, allow_unknown=True)), 'tenant': JLeaf(str), 'user_id': JLeaf(str), 'uid': JLeaf(str) }, allow_unknown=True)) self.assertGreaterEqual(len(data['keys']), 1)
def test_ecp_info(self): self._get('/ui-api/erasure_code_profile/info') self.assertSchemaBody( JObj({ 'names': JList(six.string_types), 'failure_domains': JList(six.string_types), 'plugins': JList(six.string_types), 'devices': JList(six.string_types), 'directory': six.string_types, }))
def test_get_setting(self): setting = self._get('/api/settings/rgw-api-access-key') self.assertSchema( setting, JObj({ 'default': JAny(none=False), 'name': str, 'type': str, 'value': JAny(none=False) })) self.assertStatus(200)
def test_perf_counters_not_found(self): osds = self.ceph_cluster.mon_manager.get_osd_dump() unused_id = int(list(map(lambda o: o['osd'], osds)).pop()) + 1 self._get('/api/perf_counters/osd/{}'.format(unused_id)) self.assertStatus(404) schema = JObj(sub_elems={ 'status': str, 'detail': str, }, allow_unknown=True) self.assertEqual(self._resp.json()['detail'], "'osd.{}' not found".format(unused_id)) self.assertSchemaBody(schema)
def test_list_settings(self): settings = self._get('/api/settings') self.assertGreater(len(settings), 10) self.assertSchema( settings, JList( JObj({ 'default': JAny(none=False), 'name': str, 'type': str, 'value': JAny(none=False) }))) self.assertStatus(200)
def test_list_enabled_module(self): self._ceph_cmd(['mgr', 'module', 'enable', 'iostat']) self.wait_until_rest_api_accessible() data = self._get('/api/mgr/module') self.assertStatus(200) self.assertSchema( data, JList( JObj( sub_elems={ 'name': JLeaf(str), 'enabled': JLeaf(bool), 'always_on': JLeaf(bool), 'options': JObj({}, allow_unknown=True, unknown_schema=JObj( { 'name': str, 'type': str, 'level': str, 'flags': int, 'default_value': JAny(none=False), 'min': JAny(none=False), 'max': JAny(none=False), 'enum_allowed': JList(str), 'see_also': JList(str), 'desc': str, 'long_desc': str, 'tags': JList(str) })) }))) module_info = self.find_object_in_list('name', 'iostat', data) self.assertIsNotNone(module_info) self.assertTrue(module_info['enabled'])
def test_crud_w_locking(self): # Create self._post('/api/rgw/bucket', params={ 'bucket': 'teuth-test-bucket', 'uid': 'teuth-test-user', 'zonegroup': 'default', 'placement_target': 'default-placement', 'lock_enabled': 'true', 'lock_mode': 'GOVERNANCE', 'lock_retention_period_days': '0', 'lock_retention_period_years': '1' }) self.assertStatus(201) # Read data = self._get('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(200) self.assertSchema( data, JObj(sub_elems={ 'lock_enabled': JLeaf(bool), 'lock_mode': JLeaf(str), 'lock_retention_period_days': JLeaf(int), 'lock_retention_period_years': JLeaf(int) }, allow_unknown=True)) self.assertTrue(data['lock_enabled']) self.assertEqual(data['lock_mode'], 'GOVERNANCE') self.assertEqual(data['lock_retention_period_days'], 0) self.assertEqual(data['lock_retention_period_years'], 1) # Update self._put('/api/rgw/bucket/teuth-test-bucket', params={ 'bucket_id': data['id'], 'uid': 'teuth-test-user', 'lock_mode': 'COMPLIANCE', 'lock_retention_period_days': '15', 'lock_retention_period_years': '0' }) self.assertStatus(200) data = self._get('/api/rgw/bucket/teuth-test-bucket') self.assertTrue(data['lock_enabled']) self.assertEqual(data['lock_mode'], 'COMPLIANCE') self.assertEqual(data['lock_retention_period_days'], 15) self.assertEqual(data['lock_retention_period_years'], 0) self.assertStatus(200) # Delete self._delete('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(204)
def test_logs(self): data = self._get("/api/logs/all") self.assertStatus(200) log_entry_schema = JList(JObj({ 'addrs': JObj({ 'addrvec': JList(JObj({ 'addr': str, 'nonce': int, 'type': str })) }), 'channel': str, 'message': str, 'name': str, 'priority': str, 'rank': str, 'seq': int, 'stamp': str })) schema = JObj({ 'audit_log': log_entry_schema, 'clog': log_entry_schema }) self.assertSchema(data, schema)
def _verify_tenant_bucket(bucket, tenant, uid): full_bucket_name = '{}/{}'.format(tenant, bucket) _data = self._get('/api/rgw/bucket/{}'.format( urllib.quote_plus(full_bucket_name))) self.assertStatus(200) self.assertSchema( _data, JObj(sub_elems={ 'owner': JLeaf(str), 'bucket': JLeaf(str), 'tenant': JLeaf(str), 'bid': JLeaf(str) }, allow_unknown=True)) self.assertEqual(_data['owner'], '{}${}'.format(tenant, uid)) self.assertEqual(_data['bucket'], bucket) self.assertEqual(_data['tenant'], tenant) self.assertEqual(_data['bid'], full_bucket_name) return _data
def test_get(self): data = self._get('/api/mgr/module/telemetry') self.assertStatus(200) self.assertSchema( data, JObj(allow_unknown=True, sub_elems={ 'channel_basic': bool, 'channel_ident': bool, 'channel_crash': bool, 'channel_device': bool, 'contact': str, 'description': str, 'enabled': bool, 'interval': int, 'last_opt_revision': int, 'leaderboard': bool, 'organization': str, 'proxy': str, 'url': str }))
def test_health_permissions(self): data = self._get('/api/health/full') self.assertStatus(200) schema = JObj({ 'client_perf': JObj({}, allow_unknown=True), 'df': JObj({}, allow_unknown=True), 'health': JObj({ 'checks': JList(JObj({}, allow_unknown=True)), 'mutes': JList(JObj({}, allow_unknown=True)), 'status': str }), 'pools': JList(JLeaf(dict)), }) self.assertSchema(data, schema) cluster_pools = self.ceph_cluster.mon_manager.list_pools() self.assertEqual(len(cluster_pools), len(data['pools'])) for pool in data['pools']: self.assertIn(pool['pool_name'], cluster_pools)
class CrushRuleTest(DashboardTestCase): AUTH_ROLES = ['pool-manager'] rule_schema = JObj(sub_elems={ 'max_size': int, 'min_size': int, 'rule_id': int, 'rule_name': six.string_types, 'ruleset': int, 'steps': JList(JObj({}, allow_unknown=True)) }, allow_unknown=True) def create_and_delete_rule(self, data): name = data['name'] # Creates rule self._post('/api/crush_rule', data) self.assertStatus(201) # Makes sure rule exists rule = self._get('/api/crush_rule/{}'.format(name)) self.assertStatus(200) self.assertSchemaBody(self.rule_schema) self.assertEqual(rule['rule_name'], name) # Deletes rule self._delete('/api/crush_rule/{}'.format(name)) self.assertStatus(204) @DashboardTestCase.RunAs('test', 'test', ['rgw-manager']) def test_read_access_permissions(self): self._get('/api/crush_rule') self.assertStatus(403) @DashboardTestCase.RunAs('test', 'test', ['read-only']) def test_write_access_permissions(self): self._get('/api/crush_rule') self.assertStatus(200) data = { 'name': 'some_rule', 'root': 'default', 'failure_domain': 'osd' } self._post('/api/crush_rule', data) self.assertStatus(403) self._delete('/api/crush_rule/default') self.assertStatus(403) @classmethod def tearDownClass(cls): super(CrushRuleTest, cls).tearDownClass() cls._ceph_cmd(['osd', 'crush', 'rule', 'rm', 'some_rule']) cls._ceph_cmd(['osd', 'crush', 'rule', 'rm', 'another_rule']) def test_list(self): self._get('/api/crush_rule') self.assertStatus(200) self.assertSchemaBody(JList(self.rule_schema)) def test_create(self): self.create_and_delete_rule({ 'name': 'some_rule', 'root': 'default', 'failure_domain': 'osd' }) @DashboardTestCase.RunAs('test', 'test', ['pool-manager', 'cluster-manager']) def test_create_with_ssd(self): data = self._get('/api/osd/0') self.assertStatus(200) device_class = data['osd_metadata']['default_device_class'] self.create_and_delete_rule({ 'name': 'another_rule', 'root': 'default', 'failure_domain': 'osd', 'device_class': device_class }) def test_crush_rule_info(self): self._get('/ui-api/crush_rule/info') self.assertStatus(200) self.assertSchemaBody( JObj({ 'names': JList(six.string_types), 'nodes': JList(JObj({}, allow_unknown=True)) }))
def test_all(self): # Create a new bucket. self._post('/api/rgw/bucket', params={ 'bucket': 'teuth-test-bucket', 'uid': 'admin', 'zonegroup': 'default', 'placement_target': 'default-placement' }) self.assertStatus(201) data = self.jsonBody() self.assertSchema( data, JObj(sub_elems={ 'bucket_info': JObj(sub_elems={ 'bucket': JObj(allow_unknown=True, sub_elems={ 'name': JLeaf(str), 'bucket_id': JLeaf(str), 'tenant': JLeaf(str) }), 'quota': JObj(sub_elems={}, allow_unknown=True), 'creation_time': JLeaf(str) }, allow_unknown=True) }, allow_unknown=True)) data = data['bucket_info']['bucket'] self.assertEqual(data['name'], 'teuth-test-bucket') self.assertEqual(data['tenant'], '') # List all buckets. data = self._get('/api/rgw/bucket') self.assertStatus(200) self.assertEqual(len(data), 1) self.assertIn('teuth-test-bucket', data) # Get the bucket. data = self._get('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(200) self.assertSchema( data, JObj(sub_elems={ 'id': JLeaf(str), 'bid': JLeaf(str), 'tenant': JLeaf(str), 'bucket': JLeaf(str), 'bucket_quota': JObj(sub_elems={}, allow_unknown=True), 'owner': JLeaf(str) }, allow_unknown=True)) self.assertEqual(data['bucket'], 'teuth-test-bucket') self.assertEqual(data['owner'], 'admin') self.assertEqual(data['placement_rule'], 'default-placement') self.assertEqual(data['versioning'], 'Suspended') # Update bucket: change owner, enable versioning. self._put('/api/rgw/bucket/teuth-test-bucket', params={ 'bucket_id': data['id'], 'uid': 'teuth-test-user', 'versioning_state': 'Enabled' }) self.assertStatus(200) data = self._get('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(200) self.assertSchema( data, JObj(sub_elems={ 'owner': JLeaf(str), 'bid': JLeaf(str), 'tenant': JLeaf(str) }, allow_unknown=True)) self.assertEqual(data['owner'], 'teuth-test-user') self.assertEqual(data['versioning'], 'Enabled') # Update bucket: enable MFA Delete. self._put('/api/rgw/bucket/teuth-test-bucket', params={ 'bucket_id': data['id'], 'uid': 'teuth-test-user', 'versioning_state': 'Enabled', 'mfa_delete': 'Enabled', 'mfa_token_serial': self._mfa_token_serial, 'mfa_token_pin': self._get_mfa_token_pin() }) self.assertStatus(200) data = self._get('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(200) self.assertEqual(data['versioning'], 'Enabled') self.assertEqual(data['mfa_delete'], 'Enabled') # Update bucket: disable versioning & MFA Delete. time.sleep(self._mfa_token_time_step + 2) # Required to get new TOTP pin. self._put('/api/rgw/bucket/teuth-test-bucket', params={ 'bucket_id': data['id'], 'uid': 'teuth-test-user', 'versioning_state': 'Suspended', 'mfa_delete': 'Disabled', 'mfa_token_serial': self._mfa_token_serial, 'mfa_token_pin': self._get_mfa_token_pin() }) self.assertStatus(200) data = self._get('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(200) self.assertEqual(data['versioning'], 'Suspended') self.assertEqual(data['mfa_delete'], 'Disabled') # Delete the bucket. self._delete('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(204) data = self._get('/api/rgw/bucket') self.assertStatus(200) self.assertEqual(len(data), 0)
def assert_in_and_not_none(self, data, properties): self.assertSchema( data, JObj({p: JAny(none=False) for p in properties}, allow_unknown=True))
class PoolTest(DashboardTestCase): AUTH_ROLES = ['pool-manager'] pool_schema = JObj(sub_elems={ 'pool_name': str, 'type': str, 'application_metadata': JList(str), 'flags': int, 'flags_names': str, }, allow_unknown=True) pool_list_stat_schema = JObj(sub_elems={ 'latest': int, 'rate': float, 'rates': JList(JAny(none=False)), }) pool_list_stats_schema = JObj(sub_elems={ 'bytes_used': pool_list_stat_schema, 'max_avail': pool_list_stat_schema, 'rd_bytes': pool_list_stat_schema, 'wr_bytes': pool_list_stat_schema, 'rd': pool_list_stat_schema, 'wr': pool_list_stat_schema, }, allow_unknown=True) pool_rbd_conf_schema = JList( JObj(sub_elems={ 'name': str, 'value': str, 'source': int })) @contextmanager def __yield_pool(self, name=None, data=None, deletion_name=None): """ Use either just a name or whole description of a pool to create one. This also validates the correct creation and deletion after the pool was used. :param name: Name of the pool :param data: Describes the pool in full length :param deletion_name: Only needed if the pool was renamed :return: """ data = self._create_pool(name, data) yield data self._delete_pool(deletion_name or data['pool']) def _create_pool(self, name, data): data = data or { 'pool': name, 'pg_num': '32', 'pool_type': 'replicated', 'compression_algorithm': 'snappy', 'compression_mode': 'passive', 'compression_max_blob_size': '131072', 'compression_required_ratio': '0.875', 'application_metadata': ['rbd'], 'configuration': { 'rbd_qos_bps_limit': 1024000, 'rbd_qos_iops_limit': 5000, } } self._task_post('/api/pool/', data) self.assertStatus(201) self._validate_pool_properties(data, self._get_pool(data['pool'])) return data def _delete_pool(self, name): self._task_delete('/api/pool/' + name) self.assertStatus(204) def _validate_pool_properties(self, data, pool): for prop, value in data.items(): if prop == 'pool_type': self.assertEqual(pool['type'], value) elif prop == 'size': self.assertEqual( pool[prop], int(value), '{}: {} != {}'.format(prop, pool[prop], value)) elif prop == 'pg_num': self._check_pg_num(value, pool) elif prop == 'application_metadata': self.assertIsInstance(pool[prop], list) self.assertEqual(value, pool[prop]) elif prop == 'pool': self.assertEqual(pool['pool_name'], value) elif prop.startswith('compression'): if value is not None: if prop.endswith('size'): value = int(value) elif prop.endswith('ratio'): value = float(value) self.assertEqual(pool['options'][prop], value) else: self.assertEqual(pool['options'], {}) elif prop == 'configuration': # configuration cannot really be checked here for two reasons: # 1. The default value cannot be given to this method, which becomes relevant # when resetting a value, because it's not always zero. # 2. The expected `source` cannot be given to this method, and it cannot # relibably be determined (see 1) pass else: self.assertEqual( pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value)) health = self._get('/api/health/minimal')['health'] self.assertEqual(health['status'], 'HEALTH_OK', msg='health={}'.format(health)) def _get_pool(self, pool_name): pool = self._get("/api/pool/" + pool_name) self.assertStatus(200) self.assertSchemaBody(self.pool_schema) return pool def _check_pg_num(self, value, pool): """ If both properties have not the same value, the cluster goes into a warning state, which will only happen during a pg update on an existing pool. The test that does that is currently commented out because our QA systems can't deal with the change. Feel free to test it locally. """ pgp_prop = 'pg_placement_num' t = 0 while (int(value) != pool[pgp_prop] or self._get('/api/health/minimal')['health']['status'] != 'HEALTH_OK') and t < 180: time.sleep(2) t += 2 pool = self._get_pool(pool['pool_name']) for p in ['pg_num', pgp_prop]: # Should have the same values self.assertEqual(pool[p], int(value), '{}: {} != {}'.format(p, pool[p], value)) @DashboardTestCase.RunAs('test', 'test', [{ 'pool': ['create', 'update', 'delete'] }]) def test_read_access_permissions(self): self._get('/api/pool') self.assertStatus(403) self._get('/api/pool/bla') self.assertStatus(403) @DashboardTestCase.RunAs('test', 'test', [{ 'pool': ['read', 'update', 'delete'] }]) def test_create_access_permissions(self): self._task_post('/api/pool/', {}) self.assertStatus(403) @DashboardTestCase.RunAs('test', 'test', [{ 'pool': ['read', 'create', 'update'] }]) def test_delete_access_permissions(self): self._delete('/api/pool/ddd') self.assertStatus(403) def test_pool_list(self): data = self._get("/api/pool") self.assertStatus(200) cluster_pools = self.ceph_cluster.mon_manager.list_pools() self.assertEqual(len(cluster_pools), len(data)) self.assertSchemaBody(JList(self.pool_schema)) for pool in data: self.assertNotIn('pg_status', pool) self.assertNotIn('stats', pool) self.assertIn(pool['pool_name'], cluster_pools) def test_pool_list_attrs(self): data = self._get("/api/pool?attrs=type,flags") self.assertStatus(200) cluster_pools = self.ceph_cluster.mon_manager.list_pools() self.assertEqual(len(cluster_pools), len(data)) for pool in data: self.assertIn('pool_name', pool) self.assertIn('type', pool) self.assertIn('flags', pool) self.assertNotIn('flags_names', pool) self.assertNotIn('pg_status', pool) self.assertNotIn('stats', pool) self.assertIn(pool['pool_name'], cluster_pools) def test_pool_list_stats(self): data = self._get("/api/pool?stats=true") self.assertStatus(200) cluster_pools = self.ceph_cluster.mon_manager.list_pools() self.assertEqual(len(cluster_pools), len(data)) self.assertSchemaBody(JList(self.pool_schema)) for pool in data: self.assertIn('pool_name', pool) self.assertIn('type', pool) self.assertIn('application_metadata', pool) self.assertIn('flags', pool) self.assertIn('pg_status', pool) self.assertSchema(pool['stats'], self.pool_list_stats_schema) self.assertIn('flags_names', pool) self.assertIn(pool['pool_name'], cluster_pools) def test_pool_get(self): cluster_pools = self.ceph_cluster.mon_manager.list_pools() pool = self._get( "/api/pool/{}?stats=true&attrs=type,flags,stats".format( cluster_pools[0])) self.assertEqual(pool['pool_name'], cluster_pools[0]) self.assertIn('type', pool) self.assertIn('flags', pool) self.assertNotIn('pg_status', pool) self.assertSchema(pool['stats'], self.pool_list_stats_schema) self.assertNotIn('flags_names', pool) self.assertSchema(pool['configuration'], self.pool_rbd_conf_schema) def test_pool_create_with_two_applications(self): self.__yield_pool( None, { 'pool': 'dashboard_pool1', 'pg_num': '32', 'pool_type': 'replicated', 'application_metadata': ['rbd', 'sth'], }) def test_pool_create_with_ecp_and_rule(self): self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule']) self._ceph_cmd([ 'osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd' ]) self.__yield_pool( None, { 'pool': 'dashboard_pool2', 'pg_num': '32', 'pool_type': 'erasure', 'application_metadata': ['rbd'], 'erasure_code_profile': 'ecprofile', 'crush_rule': 'ecrule', }) self._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile']) def test_pool_create_with_compression(self): pool = { 'pool': 'dashboard_pool3', 'pg_num': '32', 'pool_type': 'replicated', 'compression_algorithm': 'zstd', 'compression_mode': 'aggressive', 'compression_max_blob_size': '10000000', 'compression_required_ratio': '0.8', 'configuration': { 'rbd_qos_bps_limit': 2048, 'rbd_qos_iops_limit': None, }, } with self.__yield_pool(None, pool): expected_configuration = [{ 'name': 'rbd_qos_bps_limit', 'source': 1, 'value': '2048', }, { 'name': 'rbd_qos_iops_limit', 'source': 0, 'value': '0', }] new_pool = self._get_pool(pool['pool']) for conf in expected_configuration: self.assertIn(conf, new_pool['configuration']) def test_pool_create_with_quotas(self): pools = [{ 'pool_data': { 'pool': 'dashboard_pool_quota1', 'pg_num': '32', 'pool_type': 'replicated', }, 'pool_quotas_to_check': { 'quota_max_objects': 0, 'quota_max_bytes': 0, } }, { 'pool_data': { 'pool': 'dashboard_pool_quota2', 'pg_num': '32', 'pool_type': 'replicated', 'quota_max_objects': 1024, 'quota_max_bytes': 1000, }, 'pool_quotas_to_check': { 'quota_max_objects': 1024, 'quota_max_bytes': 1000, } }] for pool in pools: pool_name = pool['pool_data']['pool'] with self.__yield_pool(pool_name, pool['pool_data']): self._validate_pool_properties(pool['pool_quotas_to_check'], self._get_pool(pool_name)) def test_pool_update_name(self): name = 'pool_update' updated_name = 'pool_updated_name' with self.__yield_pool(name, None, updated_name): props = {'pool': updated_name} self._task_put('/api/pool/{}'.format(name), props) time.sleep(5) self.assertStatus(200) self._validate_pool_properties(props, self._get_pool(updated_name)) def test_pool_update_metadata(self): pool_name = 'pool_update_metadata' with self.__yield_pool(pool_name): props = {'application_metadata': ['rbd', 'sth']} self._task_put('/api/pool/{}'.format(pool_name), props) time.sleep(5) self._validate_pool_properties(props, self._get_pool(pool_name)) properties = {'application_metadata': ['rgw']} self._task_put('/api/pool/' + pool_name, properties) time.sleep(5) self._validate_pool_properties(properties, self._get_pool(pool_name)) properties = {'application_metadata': ['rbd', 'sth']} self._task_put('/api/pool/' + pool_name, properties) time.sleep(5) self._validate_pool_properties(properties, self._get_pool(pool_name)) properties = {'application_metadata': ['rgw']} self._task_put('/api/pool/' + pool_name, properties) time.sleep(5) self._validate_pool_properties(properties, self._get_pool(pool_name)) def test_pool_update_configuration(self): pool_name = 'pool_update_configuration' with self.__yield_pool(pool_name): configuration = { 'rbd_qos_bps_limit': 1024, 'rbd_qos_iops_limit': None, } expected_configuration = [{ 'name': 'rbd_qos_bps_limit', 'source': 1, 'value': '1024', }, { 'name': 'rbd_qos_iops_limit', 'source': 0, 'value': '0', }] self._task_put('/api/pool/' + pool_name, {'configuration': configuration}) time.sleep(5) pool_config = self._get_pool(pool_name)['configuration'] for conf in expected_configuration: self.assertIn(conf, pool_config) def test_pool_update_compression(self): pool_name = 'pool_update_compression' with self.__yield_pool(pool_name): properties = { 'compression_algorithm': 'zstd', 'compression_mode': 'aggressive', 'compression_max_blob_size': '10000000', 'compression_required_ratio': '0.8', } self._task_put('/api/pool/' + pool_name, properties) time.sleep(5) self._validate_pool_properties(properties, self._get_pool(pool_name)) def test_pool_update_unset_compression(self): pool_name = 'pool_update_unset_compression' with self.__yield_pool(pool_name): self._task_put('/api/pool/' + pool_name, {'compression_mode': 'unset'}) time.sleep(5) self._validate_pool_properties( { 'compression_algorithm': None, 'compression_mode': None, 'compression_max_blob_size': None, 'compression_required_ratio': None, }, self._get_pool(pool_name)) def test_pool_update_quotas(self): pool_name = 'pool_update_quotas' with self.__yield_pool(pool_name): properties = { 'quota_max_objects': 1024, 'quota_max_bytes': 1000, } self._task_put('/api/pool/' + pool_name, properties) time.sleep(5) self._validate_pool_properties(properties, self._get_pool(pool_name)) def test_pool_create_fail(self): data = { 'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs' } self._task_post('/api/pool/', data) self.assertStatus(400) self.assertJsonBody({ 'component': 'pool', 'code': "2", 'detail': "[errno -2] specified rule dnf doesn't exist" }) def test_pool_info(self): self._get("/ui-api/pool/info") self.assertSchemaBody( JObj({ 'pool_names': JList(six.string_types), 'compression_algorithms': JList(six.string_types), 'compression_modes': JList(six.string_types), 'is_all_bluestore': bool, 'bluestore_compression_algorithm': six.string_types, 'osd_count': int, 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)), 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)), 'pg_autoscale_default_mode': six.string_types, 'pg_autoscale_modes': JList(six.string_types), 'erasure_code_profiles': JList(JObj({}, allow_unknown=True)), 'used_rules': JObj({}, allow_unknown=True), }))
def test_minimal_health(self): data = self._get('/api/health/minimal') self.assertStatus(200) schema = JObj({ 'client_perf': JObj({ 'read_bytes_sec': int, 'read_op_per_sec': int, 'recovering_bytes_per_sec': int, 'write_bytes_sec': int, 'write_op_per_sec': int }), 'df': JObj({ 'stats': JObj({ 'total_avail_bytes': int, 'total_bytes': int, 'total_used_raw_bytes': int, }) }), 'fs_map': JObj({ 'filesystems': JList(JObj({'mdsmap': self.__mdsmap_schema}), ), 'standbys': JList(JObj({}, allow_unknown=True)), }), 'health': JObj({ 'checks': JList(JObj({}, allow_unknown=True)), 'mutes': JList(JObj({}, allow_unknown=True)), 'status': str, }), 'hosts': int, 'iscsi_daemons': JObj({ 'up': int, 'down': int }), 'mgr_map': JObj({ 'active_name': str, 'standbys': JList(JLeaf(dict)) }), 'mon_status': JObj({ 'monmap': JObj({ 'mons': JList(JLeaf(dict)), }), 'quorum': JList(int) }), 'osd_map': JObj({ 'osds': JList(JObj({ 'in': int, 'up': int, })), }), 'pg_info': self.__pg_info_schema, 'pools': JList(JLeaf(dict)), 'rgw': int, 'scrub_status': str }) self.assertSchema(data, schema)
class HealthTest(DashboardTestCase): CEPHFS = True __pg_info_schema = JObj({ 'object_stats': JObj({ 'num_objects': int, 'num_object_copies': int, 'num_objects_degraded': int, 'num_objects_misplaced': int, 'num_objects_unfound': int }), 'pgs_per_osd': float, 'statuses': JObj({}, allow_unknown=True, unknown_schema=int) }) __mdsmap_schema = JObj({ 'session_autoclose': int, 'balancer': str, 'up': JObj({}, allow_unknown=True), 'last_failure_osd_epoch': int, 'in': JList(int), 'last_failure': int, 'max_file_size': int, 'explicitly_allowed_features': int, 'damaged': JList(int), 'tableserver': int, 'failed': JList(int), 'metadata_pool': int, 'epoch': int, 'stopped': JList(int), 'max_mds': int, 'compat': JObj({ 'compat': JObj({}, allow_unknown=True), 'ro_compat': JObj({}, allow_unknown=True), 'incompat': JObj({}, allow_unknown=True) }), 'min_compat_client': str, 'data_pools': JList(int), 'info': JObj({}, allow_unknown=True), 'fs_name': str, 'created': str, 'standby_count_wanted': int, 'enabled': bool, 'modified': str, 'session_timeout': int, 'flags': int, 'ever_allowed_features': int, 'root': int }) def test_minimal_health(self): data = self._get('/api/health/minimal') self.assertStatus(200) schema = JObj({ 'client_perf': JObj({ 'read_bytes_sec': int, 'read_op_per_sec': int, 'recovering_bytes_per_sec': int, 'write_bytes_sec': int, 'write_op_per_sec': int }), 'df': JObj({ 'stats': JObj({ 'total_avail_bytes': int, 'total_bytes': int, 'total_used_raw_bytes': int, }) }), 'fs_map': JObj({ 'filesystems': JList(JObj({'mdsmap': self.__mdsmap_schema}), ), 'standbys': JList(JObj({}, allow_unknown=True)), }), 'health': JObj({ 'checks': JList(JObj({}, allow_unknown=True)), 'mutes': JList(JObj({}, allow_unknown=True)), 'status': str, }), 'hosts': int, 'iscsi_daemons': JObj({ 'up': int, 'down': int }), 'mgr_map': JObj({ 'active_name': str, 'standbys': JList(JLeaf(dict)) }), 'mon_status': JObj({ 'monmap': JObj({ 'mons': JList(JLeaf(dict)), }), 'quorum': JList(int) }), 'osd_map': JObj({ 'osds': JList(JObj({ 'in': int, 'up': int, })), }), 'pg_info': self.__pg_info_schema, 'pools': JList(JLeaf(dict)), 'rgw': int, 'scrub_status': str }) self.assertSchema(data, schema) def test_full_health(self): data = self._get('/api/health/full') self.assertStatus(200) module_info_schema = JObj({ 'can_run': bool, 'error_string': str, 'name': str, 'module_options': JObj({}, allow_unknown=True, unknown_schema=JObj({ 'name': str, 'type': str, 'level': str, 'flags': int, 'default_value': str, 'min': str, 'max': str, 'enum_allowed': JList(str), 'see_also': JList(str), 'desc': str, 'long_desc': str, 'tags': JList(str), })), }) schema = JObj({ 'client_perf': JObj({ 'read_bytes_sec': int, 'read_op_per_sec': int, 'recovering_bytes_per_sec': int, 'write_bytes_sec': int, 'write_op_per_sec': int }), 'df': JObj({ 'pools': JList( JObj({ 'stats': JObj({ 'stored': int, 'stored_data': int, 'stored_omap': int, 'objects': int, 'kb_used': int, 'bytes_used': int, 'data_bytes_used': int, 'omap_bytes_used': int, 'percent_used': float, 'max_avail': int, 'quota_objects': int, 'quota_bytes': int, 'dirty': int, 'rd': int, 'rd_bytes': int, 'wr': int, 'wr_bytes': int, 'compress_bytes_used': int, 'compress_under_bytes': int, 'stored_raw': int }), 'name': str, 'id': int })), 'stats': JObj({ 'total_avail_bytes': int, 'total_bytes': int, 'total_used_bytes': int, 'total_used_raw_bytes': int, 'total_used_raw_ratio': float, 'num_osds': int, 'num_per_pool_osds': int, 'num_per_pool_omap_osds': int }) }), 'fs_map': JObj({ 'compat': JObj({ 'compat': JObj({}, allow_unknown=True, unknown_schema=str), 'incompat': JObj({}, allow_unknown=True, unknown_schema=str), 'ro_compat': JObj({}, allow_unknown=True, unknown_schema=str) }), 'default_fscid': int, 'epoch': int, 'feature_flags': JObj({}, allow_unknown=True, unknown_schema=bool), 'filesystems': JList(JObj({ 'id': int, 'mdsmap': self.__mdsmap_schema }), ), 'standbys': JList(JObj({}, allow_unknown=True)), }), 'health': JObj({ 'checks': JList(JObj({}, allow_unknown=True)), 'mutes': JList(JObj({}, allow_unknown=True)), 'status': str, }), 'hosts': int, 'iscsi_daemons': JObj({ 'up': int, 'down': int }), 'mgr_map': JObj( { 'active_addr': str, 'active_addrs': JObj({ 'addrvec': JList(JObj({ 'addr': str, 'nonce': int, 'type': str })) }), 'active_change': str, # timestamp 'active_mgr_features': int, 'active_gid': int, 'active_name': str, 'always_on_modules': JObj({}, allow_unknown=True), 'available': bool, 'available_modules': JList(module_info_schema), 'epoch': int, 'modules': JList(str), 'services': JObj( {'dashboard': str }, # This module should always be present allow_unknown=True, unknown_schema=str), 'standbys': JList( JObj( { 'available_modules': JList(module_info_schema), 'gid': int, 'name': str, 'mgr_features': int }, allow_unknown=True)) }, allow_unknown=True), 'mon_status': JObj({ 'election_epoch': int, 'extra_probe_peers': JList(JAny(none=True)), 'feature_map': JObj({}, allow_unknown=True, unknown_schema=JList( JObj({ 'features': str, 'num': int, 'release': str }))), 'features': JObj({ 'quorum_con': str, 'quorum_mon': JList(str), 'required_con': str, 'required_mon': JList(str) }), 'monmap': JObj( { # TODO: expand on monmap schema 'mons': JList(JLeaf(dict)), }, allow_unknown=True), 'name': str, 'outside_quorum': JList(int), 'quorum': JList(int), 'quorum_age': int, 'rank': int, 'state': str, # TODO: What type should be expected here? 'sync_provider': JList(JAny(none=True)) }), 'osd_map': JObj( { # TODO: define schema for crush map and osd_metadata, among # others 'osds': JList(JObj({ 'in': int, 'up': int, }, allow_unknown=True)), }, allow_unknown=True), 'pg_info': self.__pg_info_schema, 'pools': JList(JLeaf(dict)), 'rgw': int, 'scrub_status': str }) self.assertSchema(data, schema) cluster_pools = self.ceph_cluster.mon_manager.list_pools() self.assertEqual(len(cluster_pools), len(data['pools'])) for pool in data['pools']: self.assertIn(pool['pool_name'], cluster_pools) @DashboardTestCase.RunAs('test', 'test', ['pool-manager']) def test_health_permissions(self): data = self._get('/api/health/full') self.assertStatus(200) schema = JObj({ 'client_perf': JObj({}, allow_unknown=True), 'df': JObj({}, allow_unknown=True), 'health': JObj({ 'checks': JList(JObj({}, allow_unknown=True)), 'mutes': JList(JObj({}, allow_unknown=True)), 'status': str }), 'pools': JList(JLeaf(dict)), }) self.assertSchema(data, schema) cluster_pools = self.ceph_cluster.mon_manager.list_pools() self.assertEqual(len(cluster_pools), len(data['pools'])) for pool in data['pools']: self.assertIn(pool['pool_name'], cluster_pools)