def _group_filter_with_filter_not_configured(self, policy): self.flags(scheduler_default_filters=['f1', 'f2']) sched = fakes.FakeFilterScheduler() instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = objects.InstanceGroup() group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = [policy] filter_properties = { 'scheduler_hints': { 'group': group.uuid, }, } with contextlib.nested( mock.patch.object(objects.InstanceGroup, 'get_by_uuid', return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): self.assertRaises(exception.NoValidHost, sched._setup_instance_group, self.context, filter_properties)
def _group_details_in_filter_properties(self, group, func='get_by_uuid', hint=None, policy=None): sched = fakes.FakeFilterScheduler() filter_properties = { 'scheduler_hints': { 'group': hint, }, 'group_hosts': ['hostB'], } with contextlib.nested( mock.patch.object(objects.InstanceGroup, func, return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): sched._supports_anti_affinity = True update_group_hosts = sched._setup_instance_group( self.context, filter_properties) self.assertTrue(update_group_hosts) self.assertEqual(set(['hostA', 'hostB']), filter_properties['group_hosts']) self.assertEqual([policy], filter_properties['group_policies'])
def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded and that # the information needed in request_spec is still present for error # handling self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} instance_uuids = ['fake-id'] request_spec = dict(instance_properties=instance_properties, instance_uuids=instance_uuids) retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, self.context, request_spec, admin_password=None, injected_files=None, requested_networks=None, is_first_time=False, filter_properties=filter_properties, legacy_bdm_in_spec=False) uuids = request_spec.get('instance_uuids') self.assertEqual(uuids, instance_uuids)
def test_run_instance_non_admin(self): """Test creating an instance locally using run_instance, passing a non-admin context. DB actions should work.""" self.was_admin = False def fake_get(context, *args, **kwargs): # make sure this is called with admin context, even though # we're using user context below self.was_admin = context.is_admin return {} sched = fakes.FakeFilterScheduler() self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get) fake_context = context.RequestContext('user', 'project') request_spec = { 'instance_type': { 'memory_mb': 1, 'local_gb': 1 }, 'instance_properties': { 'project_id': 1 } } self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, fake_context, request_spec, None) self.assertTrue(self.was_admin)
def test_prep_resize_post_populates_retry(self): self.manager.driver = fakes.FakeFilterScheduler() image = 'image' instance_uuid = 'fake-instance-id' instance = fake_instance.fake_db_instance(uuid=instance_uuid) instance_properties = {'project_id': 'fake', 'os_type': 'Linux'} instance_type = "m1.tiny" request_spec = {'instance_properties': instance_properties, 'instance_type': instance_type, 'instance_uuids': [instance_uuid]} retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} reservations = None hosts = [dict(host='host', nodename='node', limits={})] self._mox_schedule_method_helper('select_destinations') self.manager.driver.select_destinations( self.context, request_spec, filter_properties).AndReturn(hosts) self.mox.StubOutWithMock(self.manager.compute_rpcapi, 'prep_resize') self.manager.compute_rpcapi.prep_resize(self.context, image, mox.IsA(instance_obj.Instance), instance_type, 'host', reservations, request_spec=request_spec, filter_properties=filter_properties, node='node') self.mox.ReplayAll() self.manager.prep_resize(self.context, image, request_spec, filter_properties, instance, instance_type, reservations) self.assertEqual([['host', 'node']], filter_properties['retry']['hosts'])
def test_retry_attempt_two(self): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = dict(instance_properties={}, instance_uuids=['fake-uuid1']) filter_properties = {'retry': {'num_attempts': 1}} expected_filter_properties = {'retry': {'num_attempts': 2}} self.mox.StubOutWithMock(sched, '_schedule') self.mox.StubOutWithMock(sched, '_provision_resource') sched._schedule(self.context, request_spec, expected_filter_properties).AndReturn(['host1']) sched._provision_resource(self.context, 'host1', request_spec, expected_filter_properties, None, None, None, None, instance_uuid='fake-uuid1', legacy_bdm_in_spec=False) self.mox.ReplayAll() sched.schedule_run_instance(self.context, request_spec, None, None, None, None, filter_properties, False)
def _test_group_details_in_filter_properties(self, group, func, hint): sched = fakes.FakeFilterScheduler() filter_properties = { 'scheduler_hints': { 'group': hint, }, 'group_hosts': ['hostB'], } with contextlib.nested( mock.patch.object(instance_group_obj.InstanceGroup, func, return_value=group), mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): update_group_hosts = sched._setup_instance_group( self.context, filter_properties) self.assertTrue(update_group_hosts) self.assertEqual(set(['hostA', 'hostB']), filter_properties['group_hosts']) self.assertEqual(['anti-affinity'], filter_properties['group_policies'])
def test_run_instance_non_admin(self): self.was_admin = False def fake_get(context, *args, **kwargs): # make sure this is called with admin context, even though # we're using user context below self.was_admin = context.is_admin return {} sched = fakes.FakeFilterScheduler() self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get) fake_context = context.RequestContext('user', 'project') uuid = 'fake-uuid1' instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1}, 'instance_properties': instance_properties, 'instance_uuids': [uuid]} self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original(fake_context, uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}, False) self.assertTrue(self.was_admin)
def test_group_details_in_filter_properties(self): sched = fakes.FakeFilterScheduler() instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = instance_group_obj.InstanceGroup() group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = ['anti-affinity'] filter_properties = { 'scheduler_hints': { 'group': group.uuid, }, } with contextlib.nested( mock.patch.object(instance_group_obj.InstanceGroup, 'get_by_uuid', return_value=group), mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): update_group_hosts = sched._setup_instance_group( self.context, filter_properties) self.assertTrue(update_group_hosts) self.assertEqual(set(['hostA']), filter_properties['group_hosts']) self.assertEqual(['anti-affinity'], filter_properties['group_policies'])
def test_prep_resize_post_populates_retry(self): """Prep resize should add a ('host', 'node') entry to the retry dict""" sched = fakes.FakeFilterScheduler() image = 'image' instance = db.instance_create(self.context, {}) instance_properties = {'project_id': 'fake', 'os_type': 'Linux'} instance_type = instance_types.get_instance_type_by_name("m1.tiny") request_spec = {'instance_properties': instance_properties, 'instance_type': instance_type} retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} reservations = None host = fakes.FakeHostState('host', 'node', {}) weighed_host = weights.WeighedHost(host, 1) weighed_hosts = [weighed_host] self.mox.StubOutWithMock(sched, '_schedule') self.mox.StubOutWithMock(sched.compute_rpcapi, 'prep_resize') sched._schedule(self.context, request_spec, filter_properties, [instance['uuid']]).AndReturn(weighed_hosts) sched.compute_rpcapi.prep_resize(self.context, image, instance, instance_type, 'host', reservations, request_spec=request_spec, filter_properties=filter_properties, node='node') self.mox.ReplayAll() sched.schedule_prep_resize(self.context, image, request_spec, filter_properties, instance, instance_type, reservations) self.assertEqual([('host', 'node')], filter_properties['retry']['hosts'])
def test_run_instance_no_hosts(self): sched = fakes.FakeFilterScheduler() uuid = 'fake-uuid1' fake_context = context.RequestContext('user', 'project') instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = { 'instance_type': { 'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0 }, 'instance_properties': instance_properties, 'instance_uuids': [uuid] } self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original( fake_context, uuid, { 'vm_state': vm_states.ERROR, 'task_state': None }).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc( fake_context, new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec, None, None, None, None, {}, False)
def test_handles_deleted_instance(self): """Test instance deletion while being scheduled.""" def _raise_instance_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id='123') self.stubs.Set(driver, 'instance_update_db', _raise_instance_not_found) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') host_state = host_manager.HostState('host2', 'node2') weighted_host = weights.WeighedHost(host_state, 1.42) filter_properties = {} uuid = 'fake-uuid1' instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = { 'instance_type': { 'memory_mb': 1, 'local_gb': 1 }, 'instance_properties': instance_properties, 'instance_uuids': [uuid] } sched._provision_resource(fake_context, weighted_host, request_spec, filter_properties, None, None, None, None)
def test_schedule_large_host_pool(self, mock_get_extra): """Hosts should still be chosen if pool size is larger than number of filtered hosts. """ sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.flags(scheduler_host_subset_size=20) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) fakes.mox_host_manager_db_calls(self.mox, fake_context) instance_properties = { 'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux', 'uuid': 'fake-uuid' } request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.ReplayAll() hosts = sched._schedule(self.context, request_spec, filter_properties=filter_properties) # one host should be chose self.assertEqual(len(hosts), 1)
def test_schedule_prep_resize_doesnt_update_host(self): fake_context = context.RequestContext('user', 'project', is_admin=True) sched = fakes.FakeFilterScheduler() def _return_hosts(*args, **kwargs): host_state = host_manager.HostState('host2', 'node2') return [weights.WeighedHost(host_state, 1.0)] self.stubs.Set(sched, '_schedule', _return_hosts) info = {'called': 0} def _fake_instance_update_db(*args, **kwargs): # This should not be called info['called'] = 1 self.stubs.Set(driver, 'instance_update_db', _fake_instance_update_db) instance = {'uuid': 'fake-uuid', 'host': 'host1'} sched.schedule_prep_resize(fake_context, {}, {}, {}, instance, {}, None) self.assertEqual(info['called'], 0)
def test_retry_force_nodes(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = dict(instance_properties={}, instance_uuids=['fake-uuid1']) filter_properties = {'force_nodes': ['force_node']} self.mox.StubOutWithMock(sched, '_schedule') self.mox.StubOutWithMock(sched, '_provision_resource') sched._schedule(self.context, request_spec, filter_properties).AndReturn(['host1']) sched._provision_resource(self.context, 'host1', request_spec, filter_properties, None, None, None, None, instance_uuid='fake-uuid1', legacy_bdm_in_spec=False) self.mox.ReplayAll() sched.schedule_run_instance(self.context, request_spec, None, None, None, None, filter_properties, False)
def test_schedule_host_pool(self): """Make sure the scheduler_host_subset_size property works properly.""" self.flags(scheduler_host_subset_size=2) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) fakes.mox_host_manager_db_calls(self.mox, fake_context) instance_properties = { 'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux' } request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.ReplayAll() hosts = sched._schedule(self.context, request_spec, filter_properties=filter_properties) # one host should be chosen self.assertEqual(len(hosts), 1)
def test_get_cost_functions(self): self.flags(reserved_host_memory_mb=128) fixture = fakes.FakeFilterScheduler() fns = fixture.get_cost_functions() self.assertEquals(len(fns), 1) weight, fn = fns[0] self.assertEquals(weight, -1.0) hostinfo = host_manager.HostState('host', 'compute') hostinfo.update_from_compute_node( dict(memory_mb=1000, local_gb=0, vcpus=1)) self.assertEquals(1000 - 128, fn(hostinfo, {}))
def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_host(filter_properties, host) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual(host, hosts[0])
def test_get_cost_functions(self): fixture = fakes.FakeFilterScheduler() fns = fixture.get_cost_functions() self.assertEquals(len(fns), 1) weight, fn = fns[0] self.assertEquals(weight, -1.0) hostinfo = host_manager.HostState('host', 'compute') hostinfo.update_from_compute_node(dict(memory_mb=1000, local_gb=0, vcpus=1, disk_available_least=1000, free_disk_mb=1000, free_ram_mb=872, vcpus_used=0, local_gb_used=0)) self.assertEquals(872, fn(hostinfo, {}))
def test_retry_exceeded_max_attempts(self): """Test for necessary explosion when max retries is exceeded""" self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched._schedule, self.context, request_spec, filter_properties=filter_properties)
def test_select_destinations(self, mock_get_extra): """select_destinations is basically a wrapper around _schedule(). Similar to the _schedule tests, this just does a happy path test to ensure there is nothing glaringly wrong. """ self.next_weight = 1.0 selected_hosts = [] selected_nodes = [] def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] selected_hosts.append(host_state.host) selected_nodes.append(host_state.nodename) return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) request_spec = { 'instance_type': { 'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1 }, 'instance_properties': { 'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux', 'uuid': 'fake-uuid' }, 'num_instances': 1 } self.mox.ReplayAll() dests = sched.select_destinations(fake_context, request_spec, {}) (host, node) = (dests[0]['host'], dests[0]['nodename']) self.assertEqual(host, selected_hosts[0]) self.assertEqual(node, selected_nodes[0])
def test_retry_disabled(self): """Retry info should not get populated when re-scheduling is off""" self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) # should not have retry info in the populated filter properties: self.assertFalse("retry" in filter_properties)
def test_retry_attempt_one(self): """Test retry logic on initial scheduling attempt""" self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts)
def test_post_select_populate(self): """Test addition of certain filter props after a host is selected""" retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() host_state = host_manager.HostState('host', 'node') host_state.limits['vcpus'] = 5 sched._post_select_populate_filter_properties(filter_properties, host_state) self.assertEqual('host', filter_properties['retry']['hosts'][0]) self.assertEqual({'vcpus': 5}, host_state.limits)
def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded and that # the information needed in request_spec is still present for error # handling self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = dict(instance_properties={}, instance_uuids=['fake-uuid1']) filter_properties = {'retry': {'num_attempts': 2}} self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, self.context, request_spec, None, None, None, None, filter_properties, False)
def test_schedule_chooses_best_host(self, mock_get_extra): """If scheduler_host_subset_size is 1, the largest host with greatest weight should be returned. """ self.flags(scheduler_host_subset_size=1) sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) fakes.mox_host_manager_db_calls(self.mox, fake_context) self.next_weight = 50 def _fake_weigh_objects(_self, functions, hosts, options): this_weight = self.next_weight self.next_weight = 0 host_state = hosts[0] return [weights.WeighedHost(host_state, this_weight)] instance_properties = { 'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux', 'uuid': 'fake-uuid' } request_spec = dict(instance_properties=instance_properties, instance_type={}) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) filter_properties = {} self.mox.ReplayAll() hosts = sched._schedule(self.context, request_spec, filter_properties=filter_properties) # one host should be chosen self.assertEqual(1, len(hosts)) self.assertEqual(50, hosts[0].weight)
def test_retry_attempt_two(self): """Test retry logic when re-scheduling""" self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) retry = dict(num_attempts=1) filter_properties = dict(retry=retry) sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts)
def test_select_hosts_happy_day(self): """select_hosts is basically a wrapper around the _select() method. Similar to the _select tests, this just does a happy path test to ensure there is nothing glaringly wrong. """ self.next_weight = 1.0 selected_hosts = [] def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] selected_hosts.append(host_state.host) return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) request_spec = { 'num_instances': 10, 'instance_type': { 'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1 }, 'instance_properties': { 'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux' } } self.mox.ReplayAll() hosts = sched.select_hosts(fake_context, request_spec, {}) self.assertEquals(len(hosts), 10) self.assertEquals(hosts, selected_hosts)
def test_retry_force_nodes(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) filter_properties = dict(force_nodes=['force_node']) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) # should not have retry info in the populated filter properties: self.assertNotIn("retry", filter_properties)
def test_schedule_happy_day(self, mock_get_extra): """Make sure there's nothing glaringly wrong with _schedule() by doing a happy day pass through. """ self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls(self.mox, fake_context) request_spec = { 'num_instances': 10, 'instance_type': { 'memory_mb': 512, 'root_gb': 512, 'ephemeral_gb': 0, 'vcpus': 1 }, 'instance_properties': { 'project_id': 1, 'root_gb': 512, 'memory_mb': 512, 'ephemeral_gb': 0, 'vcpus': 1, 'os_type': 'Linux', 'uuid': 'fake-uuid' } } self.mox.ReplayAll() weighed_hosts = sched._schedule(fake_context, request_spec, {}) self.assertEqual(len(weighed_hosts), 10) for weighed_host in weighed_hosts: self.assertIsNotNone(weighed_host.obj)