def test_create_consistencygroup_no_hosts(self): # Ensure empty hosts result in NoValidBackend exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = { 'volume_properties': { 'project_id': 1, 'size': 0 }, 'volume_type': { 'name': 'Type1', 'extra_specs': {} } } request_spec2 = { 'volume_properties': { 'project_id': 1, 'size': 0 }, 'volume_type': { 'name': 'Type2', 'extra_specs': {} } } request_spec_list = [request_spec, request_spec2] self.assertRaises(exception.NoValidBackend, sched.schedule_create_consistencygroup, fake_context, 'faki-id1', request_spec_list, {})
def test_schedule_consistencygroup(self, _mock_service_get_all): # Make sure _schedule_group() can find host successfully. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all) specs = {'capabilities:consistencygroup_support': '<is> True'} request_spec = { 'volume_properties': { 'project_id': 1, 'size': 0 }, 'volume_type': { 'name': 'Type1', 'extra_specs': specs } } request_spec2 = { 'volume_properties': { 'project_id': 1, 'size': 0 }, 'volume_type': { 'name': 'Type2', 'extra_specs': specs } } request_spec_list = [request_spec, request_spec2] weighed_host = sched._schedule_group(fake_context, request_spec_list, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all.called)
def test_create_volume_non_admin(self, _mock_get_all_backend_states): # Test creating a volume locally using create_volume, passing # a non-admin context. DB actions should work. self.was_admin = False def fake_get(ctxt): # Make sure this is called with admin context, even though # we're using user context below. self.was_admin = ctxt.is_admin return {} sched = fakes.FakeFilterScheduler() _mock_get_all_backend_states.side_effect = fake_get fake_context = context.RequestContext('user', 'project') request_spec = { 'volume_properties': { 'project_id': 1, 'size': 1 }, 'volume_type': { 'name': 'LVM_iSCSI' }, 'volume_id': fake.VOLUME_ID } request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {}) self.assertTrue(self.was_admin)
def test_retry_attempt_two(self): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = { 'volume_type': { 'name': 'LVM_iSCSI' }, 'volume_properties': { 'project_id': 1, 'size': 1 } } request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=1) filter_properties = dict(retry=retry) sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts)
def test_schedule_consistencygroup_no_cg_support_in_extra_specs( self, _mock_service_get_all_by_topic): # Make sure _schedule_group() can find host successfully even # when consistencygroup_support is not specified in volume type's # extra specs sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type1', 'extra_specs': {}}} request_spec2 = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type2', 'extra_specs': {}}} request_spec_list = [request_spec, request_spec2] weighed_host = sched._schedule_group(fake_context, request_spec_list, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all_by_topic.called)
def test_create_volume_update_fault(self, _mock_service_get_all, _mock_volume_get, _mock_vol_fault_update, _mock_vol_update): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all) volume = fake_volume.fake_volume_obj(self.context) _mock_volume_get.return_value = volume driver.volume_update_db(self.context, volume.id, 'fake_host', 'fake_cluster') request_spec = { 'volume_type': { 'name': 'LVM_iSCSI' }, 'volume_properties': { 'project_id': 1, 'size': 1 }, 'volume_id': volume.id, 'snapshot_id': None, 'image_id': None } request_spec = objects.RequestSpec.from_primitives(request_spec) sched.schedule_create_volume(fake_context, request_spec, {}) _mock_vol_fault_update.assert_called_once_with( fake_context, volume.id, dict(message='', details=''))
def _host_passes_filters_setup(self, mock_obj): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(mock_obj) return (sched, fake_context)
def test_add_retry_backend(self): retry = dict(num_attempts=1, backends=[]) filter_properties = dict(retry=retry) backend = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_backend(filter_properties, backend) backends = filter_properties['retry']['backends'] self.assertListEqual([backend], backends)
def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_host(filter_properties, host) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual(host, hosts[0])
def test_create_volume_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_id': ['fake-id1']} self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {})
def test_create_volume_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidBackend exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_id': fake.VOLUME_ID} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {})
def test_create_volume_host_same_as_cg(self, _mock_service_get_all): # Ensure we don't clear the host whose backend is same as # consistencygroup's backend. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fakes.mock_host_manager_db_calls(_mock_service_get_all) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host1'} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertEqual('host1#lvm1', weighed_host.obj.host)
def test_create_volume_no_hosts_invalid_req(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_id' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}} self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {})
def test_create_volume_no_volume_type(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_type' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_id': ['fake-id1']} self.assertRaises(exception.InvalidVolumeType, sched.schedule_create_volume, fake_context, request_spec, {})
def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() host_state = host_manager.HostState('host') host_state.total_capacity_gb = 1024 sched._post_select_populate_filter_properties(filter_properties, host_state) self.assertEqual('host', filter_properties['retry']['hosts'][0]) self.assertEqual(1024, host_state.total_capacity_gb)
def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched._schedule, self.context, request_spec, filter_properties=filter_properties)
def test_create_volume_clear_host_different_with_cg(self, _mock_service_get_all): # Ensure we clear those hosts whose backend is not same as # consistencygroup's backend. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fakes.mock_host_manager_db_calls(_mock_service_get_all) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host@lvmdriver'} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNone(weighed_host)
def test_retry_attempt_one(self): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts)
def test_retry_revert_consumed_capacity(self): sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 2}} request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=1, backends=['fake_backend_name']) filter_properties = dict(retry=retry) with mock.patch.object( sched.host_manager, 'revert_volume_consumed_capacity') as mock_revert: sched._schedule(self.context, request_spec, filter_properties=filter_properties) mock_revert.assert_called_once_with('fake_backend_name', 2)
def test_create_volume_no_volume_type(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_type' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_id': fake.VOLUME_ID} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {})
def test_create_volume_host_different_with_resource_backend( self, resource_backend, multibackend_with_pools, _mock_service_get_all): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager( multibackend_with_pools=multibackend_with_pools) fakes.mock_host_manager_db_calls( _mock_service_get_all, backends_with_pools=multibackend_with_pools) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'resource_backend': resource_backend} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNone(weighed_host)
def test_retry_disabled(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) # Should not have retry info in the populated filter properties. self.assertNotIn("retry", filter_properties)
def test_schedule_happy_day(self, _mock_service_get_all_by_topic): # Make sure there's nothing glaringly wrong with _schedule() # by doing a happy day pass through. sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all_by_topic.called)
def test_create_volume_host_same_as_resource(self, resource_backend, multibackend_with_pools, _mock_service_get_all): # Ensure we don't clear the host whose backend is same as # requested backend (ex: create from source-volume/snapshot, # or create within a group) sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager( multibackend_with_pools=multibackend_with_pools) fakes.mock_host_manager_db_calls( _mock_service_get_all, backends_with_pools=multibackend_with_pools) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'resource_backend': resource_backend} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIn(resource_backend, weighed_host.obj.host)
def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts())