def test_get_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('get_log_levels', rpc_method='call', server=service.host, service=service, log_request='log_request', version='3.12')
def test_destroy(self, service_destroy): db_service = fake_service.fake_db_service() service = objects.Service._from_db_object(self.context, objects.Service(), db_service) with mock.patch.object(service._context, 'elevated') as elevated_ctx: service.destroy() service_destroy.assert_called_once_with(elevated_ctx(), 123)
def test_create(self, service_create): db_service = fake_service.fake_db_service() service_create.return_value = db_service service = objects.Service(context=self.context) service.create() self.assertEqual(db_service['id'], service.id) service_create.assert_called_once_with(self.context, {'uuid': mock.ANY})
def test_save(self, service_update): db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) service.topic = 'foobar' service.save() service_update.assert_called_once_with(self.context, service.id, {'topic': 'foobar'})
def create_service(ctxt, binary='cinder-volume', host='host@backend', topic='topic', disabled=False, availability_zone='cinder', **kwargs): kwargs.update(binary=binary, host=host, topic=topic, disabled=disabled, availability_zone=availability_zone) svc = objects.Service(ctxt, **kwargs) svc.create() return svc
def test_cleanup_destination_volume(self): service = objects.Service(id=1, host='hostname', cluster_name=None, binary='cinder-volume') result = self.manager._cleanup_destination(None, service) expected = self.manager.volume_api.do_cleanup, service, service.host self.assertEqual(expected, result)
def test_failover_api_fail_multiple_results_not_updated( self, failover_mock): """Fail if none of the services could be updated.""" rep_field = fields.ReplicationStatus cluster_name = 'mycluster@backend1' cluster = objects.Cluster(self.context, name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY) cluster.create() down_time = timeutils.datetime.datetime(1970, 1, 1) services = [ # This service is down objects.Service(self.context, host='host1@backend1', cluster_name=cluster_name, replication_status=rep_field.ENABLED, created_at=down_time, updated_at=down_time, modified_at=down_time, binary=constants.VOLUME_BINARY), # This service is not with the right replication status objects.Service(self.context, host='host2@backend1', cluster_name=cluster_name, replication_status=rep_field.ERROR, binary=constants.VOLUME_BINARY), ] services[0].create() services[1].create() self.assertRaises(exception.InvalidInput, self.volume_api.failover, self.context, None, cluster_name, mock.sentinel.secondary_id) for service in services: svc = objects.Service.get_by_id(self.context, service.id) self.assertEqual(service.replication_status, svc.replication_status) cluster.refresh() self.assertEqual(rep_field.ENABLED, cluster.replication_status) failover_mock.assert_not_called()
def test_failover(self, version): self.can_send_version_mock.side_effect = lambda x: x == version service = objects.Service(self.context, host='fake_host', cluster_name=None) expected_method = 'failover' if version == '3.8' else 'failover_host' self._test_rpc_api('failover', rpc_method='cast', expected_method=expected_method, server='fake_host', service=service, secondary_backend_id='fake_backend', version=version)
def test_cleanup_destination_volume_cluster_cache_hit(self): cluster = objects.Cluster(id=1, name='mycluster', binary=constants.VOLUME_BINARY) service = objects.Service(id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY) cluster_cache = {'cinder-volume': {'mycluster': cluster}} result = self.manager._cleanup_destination(cluster_cache, service) expected = self.manager.volume_api.do_cleanup, cluster, cluster.name self.assertEqual(expected, result)
def test_failover_completed(self, cctxt_mock): service = objects.Service(self.context, host='fake_host', cluster_name='cluster_name') self._test_rpc_api('failover_completed', rpc_method='cast', fanout=True, server='fake_host', service=service, updates=mock.sentinel.updates)
def _create_service_ref(self, context): zone = CONF.storage_availability_zone kwargs = {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': zone} service_ref = objects.Service(context=context, **kwargs) service_ref.create() self.service_id = service_ref.id
def create_service(ctxt, values=None): values = values or {} v = default_service_values() v.update(values) service = objects.Service(ctxt, **v) service.create() # We need to read the contents from the DB if we have set updated_at # or created_at fields if 'updated_at' in values or 'created_at' in values: service = db.service_get(ctxt, service.id) return service
def test_get_manageable_detail(self, clustered, is_detail, view_method, get_service_mock, get_cctxt_mock): if clustered: host = None cluster_name = 'mycluster' version = mv.MANAGE_EXISTING_CLUSTER kwargs = {'cluster': cluster_name} else: host = 'fakehost' cluster_name = None version = mv.MANAGE_EXISTING_LIST kwargs = {} service = objects.Service(disabled=False, host='fakehost', cluster_name=cluster_name) get_service_mock.return_value = service snaps = [mock.sentinel.snap1, mock.sentinel.snap2] get_cctxt_mock.return_value.call.return_value = snaps view_data = { 'manageable-snapshots': [{ 'vol': 'mock.sentinel.snap1' }, { 'vol': 'mock.sentinel.snap2' }] } view_path = ('cinder.api.views.manageable_snapshots.ViewBuilder.' + view_method) with mock.patch(view_path, return_value=view_data) as detail_view_mock: res = self._get_resp_get(host, is_detail, False, version=version, **kwargs) self.assertEqual(http_client.OK, res.status_int) get_cctxt_mock.assert_called_once_with(service.service_topic_queue, version=('3.10', '3.0')) get_cctxt_mock.return_value.call.assert_called_once_with( mock.ANY, 'get_manageable_snapshots', marker=None, limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], sort_dirs=['desc'], want_objects=True) detail_view_mock.assert_called_once_with(mock.ANY, snaps, len(snaps)) get_service_mock.assert_called_once_with( mock.ANY, None, host=host, binary=constants.VOLUME_BINARY, cluster_name=cluster_name)
def test_cleanup_destination_volume_cluster_cache_miss(self, get_mock): cluster = objects.Cluster(id=1, name='mycluster', binary=constants.VOLUME_BINARY) service = objects.Service(self.context, id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY) get_mock.return_value = cluster cluster_cache = collections.defaultdict(dict) result = self.manager._cleanup_destination(cluster_cache, service) expected = self.manager.volume_api.do_cleanup, cluster, cluster.name self.assertEqual(expected, result)
def test_destroy(self, service_destroy, utcnow_mock): service_destroy.return_value = { 'deleted': True, 'deleted_at': utcnow_mock.return_value} db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) with mock.patch.object(service._context, 'elevated') as elevated_ctx: service.destroy() service_destroy.assert_called_once_with(elevated_ctx(), 123) self.assertTrue(service.deleted) self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), service.deleted_at)
def test_failover_api_success_multiple_results_not_updated( self, failover_mock): """Succeed to failover even if a service is not updated.""" rep_field = fields.ReplicationStatus cluster_name = 'mycluster@backend1' cluster = objects.Cluster(self.context, name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY) cluster.create() services = [ objects.Service(self.context, host='host1@backend1', cluster_name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), objects.Service(self.context, host='host2@backend1', cluster_name=cluster_name, replication_status=rep_field.ERROR, binary=constants.VOLUME_BINARY), ] services[0].create() services[1].create() self.volume_api.failover(self.context, None, cluster_name, mock.sentinel.secondary_id) for service in services[:1] + [cluster]: service.refresh() self.assertEqual(rep_field.FAILING_OVER, service.replication_status) services[1].refresh() self.assertEqual(rep_field.ERROR, services[1].replication_status) failover_mock.assert_called_once_with(self.context, mock.ANY, mock.sentinel.secondary_id) self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)
def test_work_cleanup(self, get_mock, vol_clean_mock, sch_clean_mock): args = dict(service_id=1, cluster_name='cluster_name', host='host', binary=constants.VOLUME_BINARY, is_up=False, disabled=True, resource_id=fake.VOLUME_ID, resource_type='Volume') cluster = objects.Cluster(id=1, name=args['cluster_name'], binary=constants.VOLUME_BINARY) services = [ objects.Service(self.context, id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY, cluster=cluster), objects.Service(self.context, id=3, host='hostname', cluster_name=None, binary=constants.SCHEDULER_BINARY), objects.Service(self.context, id=4, host='hostname', cluster_name=None, binary=constants.VOLUME_BINARY) ] get_mock.return_value = services cleanup_request = objects.CleanupRequest(self.context, **args) res = self.manager.work_cleanup(self.context, cleanup_request) self.assertEqual((services[:2], services[2:]), res) self.assertEqual(1, vol_clean_mock.call_count) self.assertEqual(1, sch_clean_mock.call_count)
def _create_service_ref(self, context): zone = CONF.storage_availability_zone kwargs = { 'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': zone, 'rpc_current_version': self.manager.RPC_API_VERSION, 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), } service_ref = objects.Service(context=context, **kwargs) service_ref.create() self.service_id = service_ref.id
def test_failover_completed(self, completed_mock): rep_field = fields.ReplicationStatus svc = objects.Service(self.context, host=self.volume.host, binary=constants.VOLUME_BINARY, replication_status=rep_field.ENABLED) svc.create() self.volume.failover_completed( self.context, {'active_backend_id': 'secondary', 'replication_status': rep_field.FAILED_OVER}) service = objects.Service.get_by_id(self.context, svc.id) self.assertEqual('secondary', service.active_backend_id) self.assertEqual('failed-over', service.replication_status) completed_mock.assert_called_once_with(self.context, 'secondary')
def test_get_manageable_detail(self, clustered, is_detail, view_method, get_service_mock, get_cctxt_mock): if clustered: host = None cluster_name = 'mycluster' version = '3.17' kwargs = {'cluster': cluster_name} else: host = 'fakehost' cluster_name = None version = '3.8' kwargs = {} service = objects.Service(disabled=False, host='fakehost', cluster_name=cluster_name) get_service_mock.return_value = service volumes = [mock.sentinel.volume1, mock.sentinel.volume2] get_cctxt_mock.return_value.call.return_value = volumes view_data = {'manageable-volumes': [{'vol': str(v)} for v in volumes]} view_path = ('cinder.api.views.manageable_volumes.ViewBuilder.' + view_method) with mock.patch(view_path, return_value=view_data) as detail_view_mock: res = self._get_resp_get(host, is_detail, False, version=version, **kwargs) self.assertEqual(http_client.OK, res.status_int) get_cctxt_mock.assert_called_once_with(service.service_topic_queue, version=('3.10', '3.0')) get_cctxt_mock.return_value.call.assert_called_once_with( mock.ANY, 'get_manageable_volumes', marker=None, limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], sort_dirs=['desc'], want_objects=True) detail_view_mock.assert_called_once_with(mock.ANY, volumes, len(volumes)) get_service_mock.assert_called_once_with(mock.ANY, None, host=host, binary='cinder-volume', cluster_name=cluster_name)
def test_failover_manager(self, cluster, get_vols_mock, finish_mock): """Test manager's failover method for clustered and not clustered.""" rep_field = fields.ReplicationStatus svc = objects.Service(self.context, host=self.volume.host, binary=constants.VOLUME_BINARY, cluster_name=cluster, replication_status=rep_field.ENABLED) svc.create() vol = objects.Volume(self.context, host=self.volume.host) vol.create() get_vols_mock.return_value = [vol] with mock.patch.object(self.volume, 'driver') as driver: called, not_called = driver.failover_host, driver.failover if cluster: called, not_called = not_called, called called.return_value = ('secondary', [{ 'volume_id': vol.id, 'updates': { 'status': 'error' } }], []) self.volume.failover(self.context, secondary_backend_id='secondary') not_called.assert_not_called() called.assert_called_once_with(self.context, [vol], secondary_id='secondary', groups=[]) expected_update = { 'replication_status': rep_field.FAILED_OVER, 'active_backend_id': 'secondary', 'disabled': True, 'disabled_reason': 'failed-over' } finish_mock.assert_called_once_with(self.context, svc, expected_update) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertEqual('error', volume.status)
def _create_service_ref(self, context, rpc_version=None): kwargs = { 'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': self.availability_zone, 'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION, 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), } kwargs['cluster_name'] = self.cluster service_ref = objects.Service(context=context, **kwargs) service_ref.create() Service.service_id = service_ref.id self._ensure_cluster_exists(context, service_ref) # If we have updated the service_ref with replication data from # the cluster it will be saved. service_ref.save()
def _create_service_ref(self, context, rpc_version=None): zone = CONF.storage_availability_zone kwargs = { 'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': zone, 'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION, 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), } # TODO(geguileo): In O unconditionally set cluster_name like above # If we are upgrading we have to ignore the cluster value if not self.is_upgrading_to_n: kwargs['cluster_name'] = self.cluster service_ref = objects.Service(context=context, **kwargs) service_ref.create() Service.service_id = service_ref.id # TODO(geguileo): In O unconditionally ensure that the cluster exists if not self.is_upgrading_to_n: self._ensure_cluster_exists(context)
def test_get_manageable_snapshots(self, host, cluster_name, version, can_send_version): can_send_version.side_effect = lambda x: x == version service = objects.Service(self.context, host=host, cluster_name=cluster_name) expected_kwargs_diff = { 'want_objects': True } if version == '3.10' else {} self._test_rpc_api('get_manageable_snapshots', rpc_method='call', service=service, server=cluster_name or host, marker=5, limit=20, offset=5, sort_keys='fake_keys', sort_dirs='fake_dirs', expected_kwargs_diff=expected_kwargs_diff, version=version) can_send_version.assert_has_calls([mock.call('3.10')])
import webob from cinder.api import microversions as mv from cinder.api.v3 import router as router_v3 from cinder.api.v3 import workers from cinder.common import constants from cinder import context from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test SERVICES = ( [ objects.Service(id=1, host='host1', binary=constants.VOLUME_BINARY, cluster_name='mycluster'), objects.Service(id=2, host='host2', binary=constants.VOLUME_BINARY, cluster_name='mycluster') ], [ objects.Service(id=3, host='host3', binary=constants.VOLUME_BINARY, cluster_name='mycluster'), objects.Service(id=4, host='host4', binary=constants.VOLUME_BINARY, cluster_name='mycluster')
from oslo_serialization import jsonutils from six.moves import http_client import webob from cinder.api import microversions as mv from cinder.api.v3 import router as router_v3 from cinder.api.v3 import workers from cinder import context from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake SERVICES = ( [objects.Service(id=1, host='host1', binary='cinder-volume', cluster_name='mycluster'), objects.Service(id=2, host='host2', binary='cinder-volume', cluster_name='mycluster')], [objects.Service(id=3, host='host3', binary='cinder-volume', cluster_name='mycluster'), objects.Service(id=4, host='host4', binary='cinder-volume', cluster_name='mycluster')], ) def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper
def fake_service_obj(context, **updates): return objects.Service._from_db_object(context, objects.Service(), fake_db_service(**updates))
def test__get_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock, get_log_mock, get_all_mock): get_log_mock.return_value = mock.sentinel.api_levels backup_rpc_mock.return_value = [ objects.LogLevel(prefix='p1', level='l1'), objects.LogLevel(prefix='p2', level='l2') ] vol_rpc_mock.return_value = [ objects.LogLevel(prefix='p3', level='l3'), objects.LogLevel(prefix='p4', level='l4') ] sch_rpc_mock.return_value = [ objects.LogLevel(prefix='p5', level='l5'), objects.LogLevel(prefix='p6', level='l6') ] services = [ objects.Service(self.context, binary=constants.SCHEDULER_BINARY, host='host'), objects.Service(self.context, binary=constants.VOLUME_BINARY, host='host@backend#pool'), objects.Service(self.context, binary=constants.BACKUP_BINARY, host='host'), ] get_all_mock.return_value = services body = {'binary': '*', 'prefix': 'eventlet.'} log_level = objects.LogLevel(prefix=body['prefix']) with mock.patch('cinder.objects.LogLevel') as log_level_mock: log_level_mock.return_value = log_level res = self.controller._get_log(mock.sentinel.context, body) log_level_mock.assert_called_once_with(mock.sentinel.context, prefix=body['prefix']) expected = { 'log_levels': [ { 'binary': 'cinder-api', 'host': CONF.host, 'levels': mock.sentinel.api_levels }, { 'binary': 'cinder-scheduler', 'host': 'host', 'levels': { 'p5': 'l5', 'p6': 'l6' } }, { 'binary': constants.VOLUME_BINARY, 'host': 'host@backend#pool', 'levels': { 'p3': 'l3', 'p4': 'l4' } }, { 'binary': 'cinder-backup', 'host': 'host', 'levels': { 'p1': 'l1', 'p2': 'l2' } }, ] } self.assertDictEqual(expected, res) get_log_mock.assert_called_once_with(body['prefix']) sch_rpc_mock.assert_called_once_with(mock.sentinel.context, services[0], log_level) vol_rpc_mock.assert_called_once_with(mock.sentinel.context, services[1], log_level) backup_rpc_mock.assert_called_once_with(mock.sentinel.context, services[2], log_level)
def test_get_all_host_states(self, _mock_service_is_up, _mock_service_get_all): context = 'fake_context' topic = constants.VOLUME_TOPIC services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), dict(id=4, host='host4', topic='volume', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None), ] service_objs = [] for db_service in services: service_obj = objects.Service() service_objs.append( objects.Service._from_db_object(context, service_obj, db_service)) service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=312), 'host2': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=156), 'host3': dict(volume_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=9300), } # First test: service_is_up is always True, host5 is disabled, # host4 has no capabilities self.host_manager.service_states = service_states _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warning = _mock_warning # Get all states self.host_manager.get_all_host_states(context) _mock_service_get_all.assert_called_with(context, disabled=False, topic=topic) expected = [] for service in service_objs: expected.append(mock.call(service)) self.assertEqual(expected, _mock_service_is_up.call_args_list) # Get host_state_map and make sure we have the first 3 hosts host_state_map = self.host_manager.host_state_map self.assertEqual(3, len(host_state_map)) for i in range(3): volume_node = services[i] host = volume_node['host'] test_service.TestService._compare(self, volume_node, host_state_map[host].service) # Second test: Now service_is_up returns False for host3 _mock_service_is_up.reset_mock() _mock_service_is_up.side_effect = [True, True, False, True] _mock_service_get_all.reset_mock() _mock_warning.reset_mock() # Get all states, make sure host 3 is reported as down self.host_manager.get_all_host_states(context) _mock_service_get_all.assert_called_with(context, disabled=False, topic=topic) self.assertEqual(expected, _mock_service_is_up.call_args_list) self.assertGreater(_mock_warning.call_count, 0) # Get host_state_map and make sure we have the first 2 hosts (host3 is # down, host4 is missing capabilities) host_state_map = self.host_manager.host_state_map self.assertEqual(2, len(host_state_map)) for i in range(2): volume_node = services[i] host = volume_node['host'] test_service.TestService._compare(self, volume_node, host_state_map[host].service)
def test_cleanup_destination_scheduler(self): service = objects.Service(id=1, host='hostname', binary='cinder-scheduler') result = self.manager._cleanup_destination(None, service) expected = self.manager.sch_api.do_cleanup, None, service.host self.assertEqual(expected, result)