def _create_disk(self, ihost_id): return dbutils.create_test_idisk(device_node='/dev/sda', device_path=self.disk_device_path, available_mib=256, forihostid=ihost_id)
def test_cluster_tier_host_osd(self): storage_0 = self._create_storage_ihost('storage-0') disk_0 = dbutils.create_test_idisk( device_node='/dev/sda', device_path='/dev/disk/by-path/pci-0000:00:0d.0-ata-1.0', forihostid=storage_0.id) disk_1 = dbutils.create_test_idisk( device_node='/dev/sdb', device_path='/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0', forihostid=storage_0.id) self._create_storage_mon('storage-0', storage_0['id']) # Mock the fsid call so that we don't have to wait for the timeout with nested(mock.patch.object(ceph.CephWrapper, 'fsid'), mock.patch.object(ceph_utils, 'fix_crushmap')) as (mock_fsid, mock_fix_crushmap): mock_fix_crushmap.return_value = True mock_fsid.return_value = (mock.MagicMock(ok=False), None) self.service.start() self.service._init_ceph_cluster_info() mock_fsid.assert_called() self.assertIsNone(self.service._ceph.cluster_ceph_uuid) self.assertIsNotNone(self.service._ceph.cluster_db_uuid) # Make sure default storage tier is present tier_list = self.get_json('/storage_tiers', expect_errors=False) self.assertEqual( constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], tier_list['storage_tiers'][0]['name']) self.assertEqual(constants.SB_TIER_STATUS_DEFINED, tier_list['storage_tiers'][0]['status']) # save the current values saved_cluster_db_uuid = self.service._ceph.cluster_db_uuid # Add host cluster_uuid = uuidutils.generate_uuid() with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid: mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid) self.service._ceph.update_ceph_cluster(storage_0) self.assertIsNotNone(self.service._ceph.cluster_ceph_uuid) self.assertIsNotNone(self.service._ceph.cluster_db_uuid) self.assertEqual(saved_cluster_db_uuid, self.service._ceph.cluster_db_uuid) # self.assertEqual(self.service._ceph._cluster_ceph_uuid, self.service._ceph._cluster_db_uuid) # make sure the host addition produces the correct peer ihost_0 = self.dbapi.ihost_get(storage_0.id) self.assertEqual(storage_0.id, ihost_0.id) peer = self.dbapi.peer_get(ihost_0.peer_id) self.assertEqual(peer.name, 'group-0') self.assertEqual(peer.hosts, [storage_0.hostname]) # Add the default ceph backend values = { 'backend': constants.SB_TYPE_CEPH, 'capabilities': { 'test_bparam3': 'one', 'test_cparam3': 'two', 'test_gparam3': 'three', 'test_sparam1': 'four' }, 'services': "%s,%s" % (constants.SB_SVC_CINDER, constants.SB_SVC_GLANCE), 'confirmed': True } with nested( mock.patch.object( StorageBackendConfig, 'get_ceph_mon_ip_addresses')) as (mock_ceph_mon): response = self.post_json('/storage_backend', values, expect_errors=False) self.assertEqual(http_client.OK, response.status_int) self.assertEqual( 'ceph', # Expected self.get_json('/storage_backend/%s/' % response.json['uuid'])['backend']) # Result # update the DB to make sure that the backend set to be configured self.dbapi.storage_backend_update( response.json['uuid'], {'state': constants.SB_STATE_CONFIGURED}) # Make sure default storage tier is in use tier_list = self.get_json('/storage_tiers', expect_errors=False) self.assertEqual( constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], tier_list['storage_tiers'][0]['name']) self.assertEqual(constants.SB_TIER_STATUS_IN_USE, tier_list['storage_tiers'][0]['status']) default_tier_uuid = tier_list['storage_tiers'][0]['uuid'] # add a stor values = {'ihost_uuid': storage_0.uuid, 'idisk_uuid': disk_0.uuid} with nested( mock.patch.object(ceph_utils.CephApiOperator, 'get_monitors_status'), mock.patch.object(StorageBackendConfig, 'has_backend_configured'), mock.patch.object( rpcapi.ConductorAPI, 'configure_osd_istor')) as (mock_mon_status, mock_backend_configured, mock_osd): def fake_configure_osd_istor(context, istor_obj): istor_obj['osdid'] = 0 return istor_obj mock_mon_status.return_value = [ 3, 2, ['controller-0', 'controller-1', 'storage-0'] ] mock_osd.side_effect = fake_configure_osd_istor response = self.post_json('/istors', values, expect_errors=True) self.assertEqual(http_client.OK, response.status_int) self.assertEqual( default_tier_uuid, self.get_json('/istors/%s/' % response.json['uuid'])['tier_uuid']) # Result # Verify the tier state is still in-use tier_list = self.get_json('/storage_tiers', expect_errors=False) self.assertEqual( constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], tier_list['storage_tiers'][0]['name']) self.assertEqual(constants.SB_TIER_STATUS_IN_USE, tier_list['storage_tiers'][0]['status']) # Create a second storage tier without a cluster values = {} response = self.post_json('/storage_tiers', values, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertIn('No cluster information was provided for tier creation.', response.json['error_message']) # Create a second storage tier without a name values = {'cluster_uuid': saved_cluster_db_uuid} response = self.post_json('/storage_tiers', values, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertIn( 'Storage tier (%s) already present' % constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], response.json['error_message']) # Create a second storage tier values = {'cluster_uuid': saved_cluster_db_uuid, 'name': 'gold'} with mock.patch.object(ceph_utils.CephApiOperator, 'crushmap_tiers_add'): response = self.post_json('/storage_tiers', values, expect_errors=True) self.assertEqual(http_client.OK, response.status_int) confirm = self.get_json('/storage_tiers/%s/' % response.json['uuid']) self.assertEqual(confirm['uuid'], response.json['uuid']) self.assertEqual(confirm['name'], 'gold') self.assertEqual(confirm['type'], constants.SB_TIER_TYPE_CEPH) self.assertEqual(confirm['status'], constants.SB_TIER_STATUS_DEFINED) self.assertEqual(confirm['backend_uuid'], None) self.assertEqual(confirm['cluster_uuid'], saved_cluster_db_uuid) self.assertEqual(confirm['stors'], []) self.assertEqual(confirm['capabilities'], {}) saved_tier_uuid = response.json['uuid'] # add a stor without specifying a tier values = {'ihost_uuid': storage_0.uuid, 'idisk_uuid': disk_1.uuid} with nested( mock.patch.object(ceph_utils.CephApiOperator, 'get_monitors_status'), mock.patch.object(StorageBackendConfig, 'has_backend_configured'), mock.patch.object( rpcapi.ConductorAPI, 'configure_osd_istor')) as (mock_mon_status, mock_backend_configured, mock_osd): def fake_configure_osd_istor_1(context, istor_obj): istor_obj['osdid'] = 1 return istor_obj mock_mon_status.return_value = [ 3, 2, ['controller-0', 'controller-1', 'storage-0'] ] mock_osd.side_effect = fake_configure_osd_istor_1 response = self.post_json('/istors', values, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertIn( 'Multiple storage tiers are present. A tier is required for stor creation.', response.json['error_message']) # add a stor without specifying a tier values = { 'ihost_uuid': storage_0.uuid, 'idisk_uuid': disk_1.uuid, 'tier_uuid': saved_tier_uuid } with nested( mock.patch.object(ceph_utils.CephApiOperator, 'get_monitors_status'), mock.patch.object(StorageBackendConfig, 'has_backend_configured'), mock.patch.object( rpcapi.ConductorAPI, 'configure_osd_istor')) as (mock_mon_status, mock_backend_configured, mock_osd): def fake_configure_osd_istor_2(context, istor_obj): istor_obj['osdid'] = 1 return istor_obj mock_mon_status.return_value = [ 3, 2, ['controller-0', 'controller-1', 'storage-0'] ] mock_osd.side_effect = fake_configure_osd_istor_2 response = self.post_json('/istors', values, expect_errors=True) self.assertEqual(http_client.OK, response.status_int) self.assertEqual( saved_tier_uuid, self.get_json('/istors/%s/' % response.json['uuid'])['tier_uuid']) # Result # Verify the tier state has changed tier_list = self.get_json('/storage_tiers', expect_errors=False) self.assertEqual('gold', tier_list['storage_tiers'][1]['name']) self.assertEqual(constants.SB_TIER_STATUS_IN_USE, tier_list['storage_tiers'][1]['status']) # validate the cluster view cluster_list = self.get_json('/clusters', expect_errors=False) self.assertEqual('ceph_cluster', cluster_list['clusters'][0]['name']) response = self.get_json('/clusters/%s' % cluster_list['clusters'][0]['uuid'], expect_errors=False) self.assertEqual( constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], response['tiers'][0]['name']) self.assertEqual('gold', response['tiers'][1]['name']) # validate the tier view tier_list = self.get_json('/storage_tiers', expect_errors=False) self.assertEqual( constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], tier_list['storage_tiers'][0]['name']) self.assertEqual('gold', tier_list['storage_tiers'][1]['name']) response = self.get_json('/storage_tiers/%s' % tier_list['storage_tiers'][0]['uuid'], expect_errors=False) self.assertEqual( constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], response['name']) self.assertEqual([0], response['stors']) response = self.get_json('/storage_tiers/%s' % tier_list['storage_tiers'][1]['uuid'], expect_errors=False) self.assertEqual('gold', response['name']) self.assertEqual([1], response['stors']) # Add the ceph backend for the new tier without specifying a backend name values = { 'backend': constants.SB_TYPE_CEPH, 'capabilities': { 'test_bparam3': 'foo' }, 'confirmed': True } with nested( mock.patch.object( StorageBackendConfig, 'get_ceph_mon_ip_addresses')) as (mock_ceph_mon): response = self.post_json('/storage_ceph', values, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertIn( 'Initial (%s) backend was previously created. Use ' 'the modify API for further provisioning' % constants.SB_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], response.json['error_message']) # Add the ceph backend for the new tier without specifying the tier values = { 'backend': constants.SB_TYPE_CEPH, 'capabilities': { 'test_bparam3': 'foo' }, 'name': 'ceph-gold', 'confirmed': True } with nested( mock.patch.object( StorageBackendConfig, 'get_ceph_mon_ip_addresses')) as (mock_ceph_mon): response = self.post_json('/storage_ceph', values, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertIn('No tier specified for this backend.', response.json['error_message']) # Add the ceph backend for the new tier values = { 'backend': constants.SB_TYPE_CEPH, 'capabilities': { 'test_bparam3': 'one', 'test_cparam3': 'two' }, 'services': constants.SB_SVC_CINDER, 'name': 'ceph-gold', 'tier_uuid': saved_tier_uuid, 'confirmed': True } with nested( mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses'), mock.patch.object(StorageBackendConfig, 'get_ceph_tier_size')) as (mock_ceph_mon, mock_space): mock_space.return_value = 0 response = self.post_json('/storage_ceph', values, expect_errors=True) self.assertEqual(http_client.OK, response.status_int) self.assertEqual( 'ceph-gold', self.get_json('/storage_backend/%s/' % response.json['uuid'])['name']) # Result # validate the backend view backend_list = self.get_json('/storage_backend', expect_errors=False) self.assertEqual(http_client.OK, response.status_int) self.assertEqual( constants.SB_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH], backend_list['storage_backends'][0]['name']) self.assertEqual('ceph-gold', backend_list['storage_backends'][1]['name'])
def setUp(self): super(ApiPVPostTestSuiteMixin, self).setUp() disk_1_path = '/dev/disk/by-path/pci-0000:00:0d.0-ata-2.0' self.disk_1 = dbutils.create_test_idisk(device_node='/dev/sdb', device_path=disk_1_path, forihostid=self.host.id)