def test_trusted_filter_combine_hosts(self, req_mock): fake_compute_nodes = [ objects.ComputeNode(hypervisor_hostname='node1'), objects.ComputeNode(hypervisor_hostname='node2') ] with mock.patch('patron.objects.ComputeNodeList.get_all') as mocked: mocked.return_value = fake_compute_nodes self.filt_cls = trusted_filter.TrustedFilter() oat_data = { "hosts": [{ "host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00" }] } req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} filter_properties = { 'context': mock.sentinel.ctx, 'instance_type': { 'memory_mb': 1024, 'extra_specs': extra_specs } } host = fakes.FakeHostState('host1', 'node1', {}) self.filt_cls.host_passes(host, filter_properties) # Fill the caches self.assertTrue(req_mock.called) self.assertEqual(1, req_mock.call_count) call_args = list(req_mock.call_args[0]) expected_call_args = ['POST', 'PollHosts', ['node2', 'node1']] self.assertJsonEqual(call_args, expected_call_args)
def fake_get_compute_nodes_in_db(context, use_slave=False): fake_compute_nodes = [{ 'local_gb': 259, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'fake_phyp1', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', 'numa_topology': '', 'stats': '', 'id': 2, 'host': 'fake_phyp1', 'host_ip': '127.0.0.1' }] return [ objects.ComputeNode._from_db_object(context, objects.ComputeNode(), cn) for cn in fake_compute_nodes ]
def test_compute_node_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of computes. # Manager should turn these into a single list of responses. for i in xrange(3): cell_name = 'path!to!cell%i' % i compute_nodes = [] for compute_node in FAKE_COMPUTE_NODES: fake_compute = objects.ComputeNode(**compute_node) fake_compute._cached_service = None compute_nodes.append(fake_compute) expected_compute_node = cells_utils.ComputeNodeProxy( fake_compute, cell_name) expected_response.append( (cell_name, expected_compute_node, fake_compute)) response = messaging.Response(self.ctxt, cell_name, compute_nodes, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get_all') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node') self.msg_runner.compute_node_get_all(self.ctxt, hypervisor_match='fake-match').AndReturn(responses) # Calls are done by cells, so we need to sort the list by the cell name expected_response.sort(key=lambda k: k[0]) for cell_name, compute_proxy, compute_node in expected_response: cells_utils.add_cell_to_compute_node( compute_node, cell_name).AndReturn(compute_proxy) self.mox.ReplayAll() response = self.cells_manager.compute_node_get_all(self.ctxt, hypervisor_match='fake-match') self.assertEqual([proxy for cell, proxy, compute in expected_response], response)
def _fake_compute_node_get_all(self, context): return [ objects.ComputeNode(id=1, service_id=1, host='fake', cpu_info='cpu_info', disk_available_least=100) ]
def setUp(self): super(TestTrustedFilter, self).setUp() # TrustedFilter's constructor creates the attestation cache, which # calls to get a list of all the compute nodes. fake_compute_nodes = [ objects.ComputeNode(hypervisor_hostname='node1'), ] with mock.patch('patron.objects.ComputeNodeList.get_all') as mocked: mocked.return_value = fake_compute_nodes self.filt_cls = trusted_filter.TrustedFilter()
def test_obj_make_compatible_for_compute_node(self, get_all_by_host): service_obj = objects.Service(context=self.context) fake_service_dict = fake_service.copy() fake_compute_obj = objects.ComputeNode(host=fake_service['host']) get_all_by_host.return_value = [fake_compute_obj] service_obj.obj_make_compatible(fake_service_dict, '1.9') self.assertEqual( fake_compute_obj.obj_to_primitive(target_version='1.10'), fake_service_dict['compute_node'])
def test_init_instance_info(self, mock_spawn, mock_get_all, mock_get_by_filters): mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k) cn1 = objects.ComputeNode(host='host1') cn2 = objects.ComputeNode(host='host2') inst1 = objects.Instance(host='host1', uuid='uuid1') inst2 = objects.Instance(host='host1', uuid='uuid2') inst3 = objects.Instance(host='host2', uuid='uuid3') mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2]) mock_get_by_filters.return_value = objects.InstanceList( objects=[inst1, inst2, inst3]) hm = self.host_manager hm._instance_info = {} hm._init_instance_info() self.assertEqual(len(hm._instance_info), 2) fake_info = hm._instance_info['host1'] self.assertIn('uuid1', fake_info['instances']) self.assertIn('uuid2', fake_info['instances']) self.assertNotIn('uuid3', fake_info['instances'])
def test_init_instance_info_batches(self, mock_spawn, mock_get_all, mock_get_by_filters): mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k) cn_list = objects.ComputeNodeList() for num in range(22): host_name = 'host_%s' % num cn_list.objects.append(objects.ComputeNode(host=host_name)) mock_get_all.return_value = cn_list self.host_manager._init_instance_info() self.assertEqual(mock_get_by_filters.call_count, 3)
def test_get_all_host_states_with_not_matching_aggs( self, svc_get_by_binary, cn_get_all, update_from_cn, mock_get_by_host): svc_get_by_binary.return_value = [ objects.Service(host='fake'), objects.Service(host='other') ] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake'), objects.ComputeNode(host='other', hypervisor_hostname='other') ] mock_get_by_host.return_value = objects.InstanceList() fake_agg = objects.Aggregate(id=1) self.host_manager.host_aggregates_map = collections.defaultdict( set, {'other': set([1])}) self.host_manager.aggs_by_id = {1: fake_agg} self.host_manager.get_all_host_states('fake-context') host_state = self.host_manager.host_state_map[('fake', 'fake')] self.assertEqual([], host_state.aggregates)
def setUp(self): raise testtools.TestCase.skipException(skip_msg) super(ExtendedHyervisorPciSampleJsonTest, self).setUp() cpu_info = collections.OrderedDict([ ('arch', 'x86_64'), ('model', 'Nehalem'), ('vendor', 'Intel'), ('features', ['pge', 'clflush']), ('topology', { 'cores': 1, 'threads': 1, 'sockets': 4, }), ]) self.fake_compute_node = objects.ComputeNode( cpu_info=jsonutils.dumps(cpu_info), current_workload=0, disk_available_least=0, host_ip="1.1.1.1", state="up", status="enabled", free_disk_gb=1028, free_ram_mb=7680, hypervisor_hostname="fake-mini", hypervisor_type="fake", hypervisor_version=1000, id=1, local_gb=1028, local_gb_used=0, memory_mb=8192, memory_mb_used=512, running_vms=0, vcpus=1, vcpus_used=0, service_id=2, host='043b3cacf6f34c90a7245151fc8ebcda', pci_device_pools=pci_device_pool.from_pci_stats({ "count": 5, "vendor_id": "8086", "product_id": "1520", "keya": "valuea", "extra_info": { "phys_function": '[["0x0000", ' '"0x04", "0x00",' ' "0x1"]]', "key1": "value1" } }), ) self.fake_service = objects.Service( id=2, host='043b3cacf6f34c90a7245151fc8ebcda', disabled=False, disabled_reason=None)
def test_add_cell_to_compute_node_no_service(self, mock_get_by_id): fake_compute = objects.ComputeNode(id=1, host='fake', service_id=1) mock_get_by_id.side_effect = exception.ServiceNotFound(service_id=1) cell_path = 'fake_path' proxy = cells_utils.add_cell_to_compute_node(fake_compute, cell_path) self.assertIsInstance(proxy, cells_utils.ComputeNodeProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertRaises(exception.ServiceNotFound, getattr, proxy, 'service')
def test_proxy_object_serializer_to_primitive(self): obj = objects.ComputeNode(id=1, host='fake') obj_proxy = cells_utils.ComputeNodeProxy(obj, 'fake_path') serializer = cells_utils.ProxyObjectSerializer() primitive = serializer.serialize_entity('ctx', obj_proxy) self.assertIsInstance(primitive, dict) class_name = primitive.pop('cell_proxy.class_name') cell_path = primitive.pop('cell_proxy.cell_path') self.assertEqual('ComputeNodeProxy', class_name) self.assertEqual('fake_path', cell_path) self.assertEqual(obj.obj_to_primitive(), primitive)
def test_add_cell_to_service_with_compute_node(self): fake_service = objects.Service(id=1, host='fake') fake_service.compute_node = objects.ComputeNode(id=1, host='fake') cell_path = 'fake_path' proxy = cells_utils.add_cell_to_service(fake_service, cell_path) self.assertIsInstance(proxy, cells_utils.ServiceProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertRaises(AttributeError, getattr, proxy, 'compute_node')
def test_get_all_host_states_with_no_aggs(self, svc_get_by_binary, cn_get_all, update_from_cn, mock_get_by_host): svc_get_by_binary.return_value = [objects.Service(host='fake')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake') ] mock_get_by_host.return_value = objects.InstanceList() self.host_manager.host_aggregates_map = collections.defaultdict(set) self.host_manager.get_all_host_states('fake-context') host_state = self.host_manager.host_state_map[('fake', 'fake')] self.assertEqual([], host_state.aggregates)
def test_compute_manager_removes_deleted_node(self): ctx = context.get_admin_context() fake.set_nodes(['A', 'B']) fake_compute_nodes = [ objects.ComputeNode(context=ctx, hypervisor_hostname='A', id=2), objects.ComputeNode(context=ctx, hypervisor_hostname='B', id=3), ] def fake_get_compute_nodes_in_db(context, use_slave=False): return fake_compute_nodes def fake_compute_node_delete(context, compute_node_id): for cn in fake_compute_nodes: if compute_node_id == cn.id: fake_compute_nodes.remove(cn) return self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stubs.Set(db, 'compute_node_delete', fake_compute_node_delete) self.compute.update_available_resource(ctx) # Verify nothing is deleted if driver and db compute nodes match self.assertEqual(len(fake_compute_nodes), 2) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B']) fake.set_nodes(['A']) self.compute.update_available_resource(ctx) # Verify B gets deleted since now only A is reported by driver self.assertEqual(len(fake_compute_nodes), 1) self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A') self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A'])
def test_proxy_object_serializer_from_primitive(self): obj = objects.ComputeNode(id=1, host='fake') serializer = cells_utils.ProxyObjectSerializer() # Recreating the primitive by hand to isolate the test for only # the deserializing method primitive = obj.obj_to_primitive() primitive['cell_proxy.class_name'] = 'ComputeNodeProxy' primitive['cell_proxy.cell_path'] = 'fake_path' result = serializer.deserialize_entity('ctx', primitive) self.assertIsInstance(result, cells_utils.ComputeNodeProxy) self.assertEqual(obj.obj_to_primitive(), result._obj.obj_to_primitive()) self.assertEqual('fake_path', result._cell_path)
def test_stat_consumption_from_compute_node(self): stats = { 'num_instances': '5', 'num_proj_12345': '3', 'num_proj_23456': '1', 'num_vm_%s' % vm_states.BUILDING: '2', 'num_vm_%s' % vm_states.SUSPENDED: '1', 'num_task_%s' % task_states.RESIZE_MIGRATING: '1', 'num_task_%s' % task_states.MIGRATING: '2', 'num_os_type_linux': '4', 'num_os_type_windoze': '1', 'io_workload': '42', } hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = objects.ComputeNode(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=None, pci_device_pools=None, metrics=None) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertEqual(5, host.num_instances) self.assertEqual(42, host.num_io_ops) self.assertEqual(10, len(host.stats)) self.assertEqual('127.0.0.1', str(host.host_ip)) self.assertEqual('htype', host.hypervisor_type) self.assertEqual('hostname', host.hypervisor_hostname) self.assertEqual('cpu_info', host.cpu_info) self.assertEqual([], host.supported_instances) self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_obj_make_compatible_with_juno_computes(self, get_all_by_host): service_obj = objects.Service( context=self.context, **fake_service) service_obj.binary = 'patron-compute' fake_service_dict = fake_service.copy() fake_service_dict['binary'] = 'patron-compute' fake_compute_obj = objects.ComputeNode(host=fake_service['host']) get_all_by_host.return_value = [fake_compute_obj] # Juno versions : # Service : 1.4 # ComputeNode : 1.5 service_obj.obj_make_compatible(fake_service_dict, '1.4') self.assertEqual( '1.5', fake_service_dict['compute_node']['patron_object.version'])
def test_add_cell_to_compute_node_with_service(self, mock_get_by_id): fake_compute = objects.ComputeNode(id=1, host='fake', service_id=1) mock_get_by_id.return_value = objects.Service(id=1, host='fake-svc') cell_path = 'fake_path' proxy = cells_utils.add_cell_to_compute_node(fake_compute, cell_path) self.assertIsInstance(proxy, cells_utils.ComputeNodeProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertIsInstance(proxy.service, cells_utils.ServiceProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.service.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake-svc'), proxy.service.host)
def test_compute_node(self): fake_compute_node = objects.ComputeNode._from_db_object( self.context, objects.ComputeNode(), test_compute_node.fake_compute_node) self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all_by_host') objects.ComputeNodeList.get_all_by_host( self.context, 'fake-host').AndReturn( [fake_compute_node]) self.mox.ReplayAll() service_obj = service.Service(id=123, host="fake-host", binary="patron-compute") service_obj._context = self.context self.assertEqual(service_obj.compute_node, fake_compute_node) # Make sure it doesn't re-fetch this service_obj.compute_node
def update_resource_stats(self, context, name, stats): """Creates or updates stats for the desired service. :param context: local context :param name: name of resource to update :type name: immutable (str or tuple) :param stats: updated stats to send to scheduler :type stats: dict """ if 'id' in stats: compute_node_id = stats['id'] updates = stats.copy() del updates['id'] else: raise exception.ComputeHostNotCreated(name=str(name)) if 'stats' in updates: # NOTE(danms): This is currently pre-serialized for us, # which we don't want if we're using the object. So, # fix it here, and follow up with removing this when the # RT is converted to proper objects. updates['stats'] = jsonutils.loads(updates['stats']) compute_node = objects.ComputeNode(context=context, id=compute_node_id) compute_node.obj_reset_changes() for k, v in updates.items(): if k == 'pci_device_pools': # NOTE(danms): Since the updates are actually the result of # a obj_to_primitive() on some real objects, we need to convert # back to a real object (not from_dict() or _from_db_object(), # which expect a db-formatted object) but just an attr-based # reconstruction. When we start getting a ComputeNode from # scheduler this "bandage" can go away. if v: devpools = [objects.PciDevicePool.from_dict(x) for x in v] else: devpools = [] compute_node.pci_device_pools = objects.PciDevicePoolList( objects=devpools) else: setattr(compute_node, k, v) compute_node.save() LOG.info(_LI('Compute_service record updated for ' '%s') % str(name))
def test_resources_consumption_from_compute_node(self): metrics = [ dict(name='res1', value=1.0, source='source1', timestamp=None), dict(name='res2', value="string2", source='source2', timestamp=None), ] hyper_ver_int = utils.convert_version_to_int('6.0.0') compute = objects.ComputeNode( metrics=jsonutils.dumps(metrics), memory_mb=0, free_disk_gb=0, local_gb=0, local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0, disk_available_least=None, updated_at=None, host_ip='127.0.0.1', hypervisor_type='htype', hypervisor_hostname='hostname', cpu_info='cpu_info', supported_hv_specs=[], hypervisor_version=hyper_ver_int, numa_topology=fakes.NUMA_TOPOLOGY._to_json(), stats=None, pci_device_pools=None) host = host_manager.HostState("fakehost", "fakenode") host.update_from_compute_node(compute) self.assertEqual(len(host.metrics), 2) self.assertEqual(set(['res1', 'res2']), set(host.metrics.keys())) self.assertEqual(1.0, host.metrics['res1'].value) self.assertEqual('source1', host.metrics['res1'].source) self.assertEqual('string2', host.metrics['res2'].value) self.assertEqual('source2', host.metrics['res2'].source) self.assertIsInstance(host.numa_topology, six.string_types)
def test_compute_node_get(self): fake_cell = 'fake-cell' fake_compute = objects.ComputeNode(**FAKE_COMPUTE_NODES[0]) fake_compute._cached_service = None fake_response = messaging.Response(self.ctxt, fake_cell, fake_compute, False) expected_response = cells_utils.ComputeNodeProxy(fake_compute, fake_cell) cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id') self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node') self.msg_runner.compute_node_get(self.ctxt, 'fake-cell', 'fake-id').AndReturn(fake_response) cells_utils.add_cell_to_compute_node( fake_compute, fake_cell).AndReturn(expected_response) self.mox.ReplayAll() response = self.cells_manager.compute_node_get(self.ctxt, compute_id=cell_and_id) self.assertEqual(expected_response, response)
def test_get_all_host_states_not_updated(self, mock_get_by_host, mock_get_all_comp, mock_get_svc_by_binary): mock_get_all_comp.return_value = fakes.COMPUTE_NODES mock_get_svc_by_binary.return_value = fakes.SERVICES context = 'fake_context' hm = self.host_manager inst1 = objects.Instance(uuid='uuid1') cn1 = objects.ComputeNode(host='host1') hm._instance_info = { 'host1': { 'instances': { 'uuid1': inst1 }, 'updated': False } } host_state = host_manager.HostState('host1', cn1) self.assertFalse(host_state.instances) mock_get_by_host.return_value = objects.InstanceList(objects=[inst1]) hm._add_instance_info(context, cn1, host_state) mock_get_by_host.assert_called_once_with(context, cn1.host) self.assertTrue(host_state.instances) self.assertEqual(host_state.instances['uuid1'], inst1)
def setUp(self, mock_init_agg, mock_init_inst): super(IronicHostManagerChangedNodesTestCase, self).setUp() self.host_manager = ironic_host_manager.IronicHostManager() ironic_driver = "patron.virt.ironic.driver.IronicDriver" supported_instances = [ objects.HVSpec.from_list(["i386", "baremetal", "baremetal"]) ] self.compute_node = objects.ComputeNode( id=1, local_gb=10, memory_mb=1024, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', stats=dict(ironic_driver=ironic_driver, cpu_arch='i386'), supported_hv_specs=supported_instances, free_disk_gb=10, free_ram_mb=1024, hypervisor_type='ironic', hypervisor_version=1, hypervisor_hostname='fake_host')
def _node(host, total_mem, total_disk, free_mem, free_disk): return objects.ComputeNode(host=host, memory_mb=total_mem, local_gb=total_disk, free_ram_mb=free_mem, free_disk_gb=free_disk)
NUMA_TOPOLOGY = objects.NUMATopology( cells=[objects.NUMACell( id=0, cpuset=set([1, 2]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) COMPUTE_NODES = [ objects.ComputeNode( id=1, local_gb=1024, memory_mb=1024, vcpus=1, disk_available_least=None, free_ram_mb=512, vcpus_used=1, free_disk_gb=512, local_gb_used=0, updated_at=None, host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1', hypervisor_version=0, numa_topology=None, hypervisor_type='foo', supported_hv_specs=[], pci_device_pools=None, cpu_info=None, stats=None, metrics=None), objects.ComputeNode( id=2, local_gb=2048, memory_mb=2048, vcpus=2, disk_available_least=1024, free_ram_mb=1024, vcpus_used=2, free_disk_gb=1024, local_gb_used=0, updated_at=None, host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1', hypervisor_version=0, numa_topology=None, hypervisor_type='foo', supported_hv_specs=[], pci_device_pools=None, cpu_info=None, stats=None, metrics=None), objects.ComputeNode( id=3, local_gb=4096, memory_mb=4096, vcpus=4, disk_available_least=3333, free_ram_mb=3072, vcpus_used=1, free_disk_gb=3072, local_gb_used=0, updated_at=None,
from patron.pci import device from patron import test from patron.tests.unit.api.openstack import fakes from patron.tests.unit.objects import test_pci_device pci_stats = [{ "count": 3, "vendor_id": "8086", "product_id": "1520", "numa_node": 1, "extra_info": { "phys_function": '[["0x0000", "0x04", ' '"0x00", "0x1"]]' } }] fake_compute_node = objects.ComputeNode( pci_device_pools=pci_device_pool.from_pci_stats(pci_stats)) class FakeResponse(wsgi.ResponseObject): pass class PciServerControllerTestV21(test.NoDBTestCase): def setUp(self): super(PciServerControllerTestV21, self).setUp() self.controller = pci.PciServerController() self.fake_obj = { 'server': { 'addresses': {}, 'id': 'fb08', 'name': 'a3',
report_count=5, disabled=False, disabled_reason=None, availability_zone="patron"), objects.Service(id=2, host="compute2", binary="patron-compute", topic="compute_topic", report_count=5, disabled=False, disabled_reason=None, availability_zone="patron"), ] TEST_HYPERS_OBJ = [ objects.ComputeNode(**hyper_dct) for hyper_dct in TEST_HYPERS ] TEST_HYPERS[0].update({'service': TEST_SERVICES[0]}) TEST_HYPERS[1].update({'service': TEST_SERVICES[1]}) TEST_SERVERS = [ dict(name="inst1", uuid="uuid1", host="compute1"), dict(name="inst2", uuid="uuid2", host="compute2"), dict(name="inst3", uuid="uuid3", host="compute1"), dict(name="inst4", uuid="uuid4", host="compute2") ] def fake_compute_node_get_all(context): return TEST_HYPERS_OBJ