def test_run_instance_no_hosts(self): def _fake_empty_call_zone_method(*args, **kwargs): return [] sched = fakes.FakeFilterScheduler() uuid = 'fake-uuid1' fake_context = context.RequestContext('user', 'project') instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0}, 'instance_properties': instance_properties, 'instance_uuids': [uuid]} self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original(fake_context, uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {})
def testProcessUpdates_compute_stopped_exception(self): vmHost = VmHost() vmHost.set_id('1') vmHost.set_connectionState(Constants.VMHOST_CONNECTED) InventoryCacheManager.update_object_in_cache('1', vmHost) self.mock.StubOutWithMock(api, 'vm_host_save') api.vm_host_save( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVmHost.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) fake_computes = [{'id': '1', 'service': {'created_at': 'created', 'updated_at':'updated'}}] self.mock.StubOutWithMock(novadb, 'compute_node_get_all') novadb.compute_node_get_all(mox.IgnoreArg()).AndReturn(fake_computes) self.mock.StubOutWithMock(hnm_utils, 'is_service_alive') hnm_utils.is_service_alive( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) self.mock.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update( mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(Exception()) self.mock.ReplayAll() self.assertEquals(self.libvirtVmHost.processUpdates(), None) self.mock.stubs.UnsetAll()
def mox_host_manager_db_calls(mock, context): mock.StubOutWithMock(db, 'compute_node_get_all') mock.StubOutWithMock(db, 'instance_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES) db.instance_get_all(mox.IgnoreArg(), columns_to_join=['instance_type']).AndReturn(INSTANCES)
def test_run_instance_no_hosts(self): def _fake_empty_call_zone_method(*args, **kwargs): return [] sched = fakes.FakeFilterScheduler() uuid = "fake-uuid1" fake_context = context.RequestContext("user", "project") instance_properties = {"project_id": 1, "os_type": "Linux"} request_spec = { "instance_type": {"memory_mb": 1, "root_gb": 1, "ephemeral_gb": 0}, "instance_properties": instance_properties, "instance_uuids": [uuid], } self.mox.StubOutWithMock(compute_utils, "add_instance_fault_from_exc") self.mox.StubOutWithMock(db, "instance_update_and_get_original") old_ref, new_ref = db.instance_update_and_get_original( fake_context, uuid, {"vm_state": vm_states.ERROR, "task_state": None} ).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc( fake_context, mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg() ) self.mox.StubOutWithMock(db, "compute_node_get_all") db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec, None, None, None, None, {}, False)
def test_get_all_host_states(self): # Ensure .service is set and we have the values we expect to. context = "fake_context" self.mox.StubOutWithMock(db, "compute_node_get_all") db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node["service"]["host"] node = compute_node["hypervisor_hostname"] state_key = (host, node) self.assertEqual(compute_node["service"], host_states_map[state_key].service) # check we have the values we think we should. self.assertEqual(1024, host_states_map[("host1", "node1uuid")].free_ram_mb) self.assertEqual(10240, host_states_map[("host1", "node1uuid")].free_disk_mb) self.assertEqual(2048, host_states_map[("host2", "node2uuid")].free_ram_mb) self.assertEqual(20480, host_states_map[("host2", "node2uuid")].free_disk_mb) self.assertEqual(3072, host_states_map[("host3", "node3uuid")].free_ram_mb) self.assertEqual(30720, host_states_map[("host3", "node3uuid")].free_disk_mb) self.assertEqual(4096, host_states_map[("host4", "node4uuid")].free_ram_mb) self.assertEqual(40960, host_states_map[("host4", "node4uuid")].free_disk_mb)
def test_get_all(self): self.mox.StubOutWithMock(db, "compute_node_get_all") db.compute_node_get_all(self.context).AndReturn([fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_all(self.context) self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, subs=self.subs(), comparators=self.comparators())
def test_get_all(self): self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(self.context).AndReturn([fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_all(self.context) self.assertEqual(1, len(computes)) self._compare(computes[0], fake_compute_node)
def test_get_all_host_states(self): context = 'fake_context' topic = 'compute' self.mox.StubOutWithMock(db, 'compute_node_get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warn') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # Invalid service host_manager.LOG.warn("No service for compute ID 5") self.mox.ReplayAll() host_states = self.host_manager.get_all_host_states(context, topic) self.assertEqual(len(host_states), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] self.assertEqual(host_states[host].service, compute_node['service']) self.assertEqual(host_states['host1'].free_ram_mb, 512) # 511GB self.assertEqual(host_states['host1'].free_disk_mb, 524288) self.assertEqual(host_states['host2'].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states['host2'].free_disk_mb, 1048576) self.assertEqual(host_states['host3'].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states['host3'].free_disk_mb, 3145728) self.assertEqual(host_states['host4'].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states['host4'].free_disk_mb, 8388608)
def test_get_all_host_states(self): # Ensure .service is set and we have the values we expect to. context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(compute_node['service'], host_states_map[state_key].service) self.assertEqual(jsonutils.loads(compute_node['stats']), host_states_map[state_key].stats) self.assertEqual(compute_node['free_ram_mb'], host_states_map[state_key].free_ram_mb) self.assertEqual(compute_node['free_disk_gb'] * 1024, host_states_map[state_key].free_disk_mb)
def test_host_removed_event(self): self.__mock_service_get_all_by_topic() deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory()['compute1'] = \ ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_REMOVED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], deleted_host.id)
def test_get_all(self): self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(self.context).AndReturn([fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_all(self.context) self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, comparators={'stats': self.json_comparator, 'host_ip': self.str_comparator})
def test_get_all_host_states(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4)
def test_get_all(self): self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(self.context).AndReturn([fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_all(self.context) self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, subs=self.subs(), comparators=self.comparators())
def test_get_all_host_states(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warn') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # node 3 host physical disk space is greater than database host_manager.LOG.warning(_LW("Host %(hostname)s has more disk space " "than database expected (%(physical)sgb >" " %(database)sgb)"), {'physical': 3333, 'database': 3072, 'hostname': 'node3'}) # Invalid service host_manager.LOG.warning(_LW("No service for compute ID %s"), 5) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(host_states_map[state_key].service, compute_node['service']) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertThat( objects.NUMATopology.obj_from_db_obj( host_states_map[('host3', 'node3')].numa_topology )._to_dict(), matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict())) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608)
def test_get_all_host_states(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warn') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # node 3 host physical disk space is greater than database host_manager.LOG.warn(_LW("Host %(hostname)s has more disk space than " "database expected (%(physical)sgb > " "%(database)sgb)"), {'physical': 3333, 'database': 3072, 'hostname': 'node3'}) # Invalid service host_manager.LOG.warn(_LW("No service for compute ID %s"), 5) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(host_states_map[state_key].service, compute_node['service']) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertThat( hardware.VirtNUMAHostTopology.from_json( host_states_map[('host3', 'node3')].numa_topology )._to_dict(), matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict())) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608)
def test_get_all_host_states(self): context = "fake_context" self.mox.StubOutWithMock(db, "compute_node_get_all") db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(4, len(host_states_map))
def test_get_all_host_states(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(4, len(host_states_map))
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory()['compute1'] = \ ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache( )[Constants.VmHost][deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def test_get_all(self): self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(self.context).AndReturn([fake_compute_node]) self.mox.ReplayAll() computes = compute_node.ComputeNodeList.get_all(self.context) self.assertEqual(1, len(computes)) self.compare_obj(computes[0], fake_compute_node, comparators={ 'stats': self.json_comparator, 'host_ip': self.str_comparator })
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache()[Constants.VmHost][ deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def test_get_all_host_states_after_delete_all(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') # all nodes active for first call db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # remove all nodes for second call db.compute_node_get_all(context).AndReturn([]) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 0)
def test_get_all_host_states_after_delete_all(self): context = "fake_context" self.mox.StubOutWithMock(db, "compute_node_get_all") # all nodes active for first call db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) # remove all nodes for second call db.compute_node_get_all(context).AndReturn([]) self.mox.ReplayAll() self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE self.host_manager.get_all_host_states(context) self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(0, len(host_states_map))
def test_get_all_host_states_after_delete_one(self): context = "fake_context" self.mox.StubOutWithMock(db, "compute_node_get_all") # all nodes active for first call db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) # remove node4 for second call running_nodes = [n for n in ironic_fakes.COMPUTE_NODES if n.get("hypervisor_hostname") != "node4uuid"] db.compute_node_get_all(context).AndReturn(running_nodes) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(3, len(host_states_map))
def get_all_host_states(self, context): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. """ # Get resource usage across the available compute nodes: compute_nodes = db.compute_node_get_all(context) for compute in compute_nodes: service = compute['service'] if not service: LOG.warn(_("No service for compute ID %s") % compute['id']) continue host = service['host'] node = compute.get('hypervisor_hostname') state_key = (host, node) capabilities = self.service_states.get(state_key, None) host_state = self.host_state_map.get(state_key) if host_state: host_state.update_capabilities(capabilities, dict(service.iteritems())) else: host_state = self.host_state_cls(host, node, capabilities=capabilities, service=dict( service.iteritems())) self.host_state_map[state_key] = host_state host_state.update_from_compute_node(compute) return self.host_state_map.itervalues()
def _refresh_from_db(self, context): """Make our compute_node inventory map match the db.""" # Add/update existing compute_nodes ... computes = db.compute_node_get_all(context) existing = InventoryCacheManager.get_all_compute_inventory().keys() db_keys = [] for compute in computes: compute_id = str(compute['id']) service = compute['service'] if service is not None: compute_alive = hnm_utils.is_service_alive( service['updated_at'], service['created_at']) db_keys.append(compute_id) if not compute_alive: LOG.warn(_('Service %s for host %s is not active') % ( service['binary'], service['host'])) # continue if compute_id not in existing: self._add_compute_to_inventory(compute[ 'hypervisor_type'], compute_id, service['host']) LOG.audit(_( 'New Host with compute_id %s is \ obtained') % (compute_id)) InventoryCacheManager.get_all_compute_inventory()[ compute_id].update_compute_Id(compute_id) else: LOG.warn(_( ' No services entry found for compute id \ %s') % compute_id) # Cleanup compute_nodes removed from db ... self._clean_deleted_computes(db_keys)
def get_node_id_from_name(self, context, physerver): node_ref = db.compute_node_get_all(context) for node in node_ref: if physerver in node["host"]: return node["id"] LOG.error("get_node_id_from_name fail, physerver= %s" % physerver) return -1
def get_compute_nodes_from_DB(context, msg_dict=None): """ This returns a list of compute nodes after querying the Nova DB. :param context: A context object that is used to authorize the DB access. :returns: A list of compute nodes that are in service """ context = context.elevated() # What is the purpose of elevation? compute_nodes = nova_db.compute_node_get_all(context) return_nodes = [] for compute in compute_nodes: service = compute['service'] if not service: msg = _("No service entry for compute ID %s.") % compute['id'] LOG.warn(msg) if msg_dict: msg_dict['messages'].append(msg) continue return_nodes.append(service["host"]) # host_to_send = {'db_id': compute['id'], # 'host_name': service['host'], # 'hyp_hostname': compute['hypervisor_hostname']} LOG.debug("db_hosts: %s" % return_nodes) return return_nodes
def _get_ip(self,context,host_name): compute_nodes = db.compute_node_get_all(context) #ip = None for node in compute_nodes: if node.get('hypervisor_hostname') == host_name: ip = node.get('host_ip') return ip
def __init__(self): ''' TODO: a. Get information about checks from Ceilometer b. Initialize adapters for each check ''' admin = context.get_admin_context() self.compute_nodes = {} # set flag to show that periodic checks are now running PeriodicChecks.periodic_tasks_running = True # get all adapters self.adapter_handler = adapters.AdapterHandler() # all compute nodes self.compute_nodes = {} # all adapters in the adapters folder self._get_all_adapters() # test code self.check_times = 1 # start checks self.run_checks({}) computes = db.compute_node_get_all(admin) for compute in computes: service = compute['service'] host = service['host'] self._init_cache_entry(host)
def get_all_host_states(self, context): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. """ # Get resource usage across the available compute nodes: compute_nodes = db.compute_node_get_all(context) seen_nodes = set() for compute in compute_nodes: service = compute['service'] if not service: LOG.warning(_LW("No service for compute ID %s"), compute['id']) continue host = service['host'] node = compute.get('hypervisor_hostname') state_key = (host, node) host_state = self.host_state_map.get(state_key) if host_state: host_state.update_from_compute_node(compute) else: host_state = self.host_state_cls(host, node, compute=compute) self.host_state_map[state_key] = host_state host_state.update_service(dict(service.iteritems())) seen_nodes.add(state_key) # remove compute nodes from host_state_map if they are not active dead_nodes = set(self.host_state_map.keys()) - seen_nodes for state_key in dead_nodes: host, node = state_key LOG.info(_LI("Removing dead compute node %(host)s:%(node)s " "from scheduler"), {'host': host, 'node': node}) del self.host_state_map[state_key] return self.host_state_map.itervalues()
def test_get_all_host_states_after_delete_one(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') # all nodes active for first call db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # remove node4 for second call running_nodes = [n for n in fakes.COMPUTE_NODES if n.get('hypervisor_hostname') != 'node4'] db.compute_node_get_all(context).AndReturn(running_nodes) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 3)
def _initCache(self): # Read from DB all the vmHost objects and populate # the cache for each IP if cache is empty LOG.info(_('Entering into initCache')) computes = db.compute_node_get_all(get_admin_context()) for compute in computes: compute_id = str(compute['id']) service = compute['service'] self._add_compute_to_inventory(compute[ 'hypervisor_type'], compute_id, service['host']) vmhosts = api.vm_host_get_all(get_admin_context()) vms = api.vm_get_all(get_admin_context()) storageVolumes = api.storage_volume_get_all(get_admin_context()) subNets = api.subnet_get_all(get_admin_context()) self._updateInventory(vmhosts) self._updateInventory(vms) self._updateInventory(storageVolumes) self._updateInventory(subNets) LOG.info(_('Hosts obtained from db: %s') % str(len(vmhosts))) LOG.info(_('Vms obtained from db: %s') % str(len(vms))) LOG.info(_('Storage volumes obtained from db: %s') % str(len(storageVolumes))) LOG.info(_('Subnets obtained from db: %s') % str(len(subNets))) LOG.info(_('Completed the initCache method'))
def get_all_host_states(self, context, topic): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. Note: this can be very slow with a lot of instances. InstanceType table isn't required since a copy is stored with the instance (in case the InstanceType changed since the instance was created).""" if topic != CONF.compute_topic: raise NotImplementedError(_("host_manager only implemented for 'compute'")) # Get resource usage across the available compute nodes: compute_nodes = db.compute_node_get_all(context) for compute in compute_nodes: service = compute["service"] if not service: LOG.warn(_("No service for compute ID %s") % compute["id"]) continue host = service["host"] capabilities = self.service_states.get(host, None) host_state = self.host_state_map.get(host) if host_state: host_state.update_capabilities(topic, capabilities, dict(service.iteritems())) else: host_state = self.host_state_cls( host, topic, capabilities=capabilities, service=dict(service.iteritems()) ) self.host_state_map[host] = host_state host_state.update_from_compute_node(compute) return self.host_state_map.itervalues()
def hosts(self, req): context = req.environ['nova.context'] authorize(context) nodes = db.compute_node_get_all(context) result = {} for node in nodes: pri_network_mbps = node.get('total_private_network_mbps', 0) pub_network_mbps = node.get('total_public_network_mbps', 0) pri_network_mbps_used = node.get('private_network_mbps_used', 0) pub_network_mbps_used = node.get('public_network_mbps_used', 0) result.update({node.hypervisor_hostname: { "ecus": self._get_host_ecu(req, node).get('capacity'), "ecus_used": self._get_host_ecu(req, node).get('ecus_used'), "disk_gb": node.local_gb, "local_gb_used": node.local_gb_used, "memory_mb": node.memory_mb, "memory_mb_used": node.memory_mb_used, "public_network_qos_mbps": pub_network_mbps, "private_network_qos_mbps": pri_network_mbps, "public_qos_used": pub_network_mbps_used, "private_qos_used": pri_network_mbps_used, "servers_used": node.running_vms, "vcpus_used": node.vcpus_used }}) return result
def get_all_host_states(self, context, topic): """Returns a dict of all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. For example: {'192.168.1.100': HostState(), ...} Note: this can be very slow with a lot of instances. InstanceType table isn't required since a copy is stored with the instance (in case the InstanceType changed since the instance was created).""" if topic != 'compute': raise NotImplementedError(_( "host_manager only implemented for 'compute'")) host_state_map = {} # Get resource usage across the available compute nodes: compute_nodes = db.compute_node_get_all(context) for compute in compute_nodes: service = compute['service'] if not service: LOG.warn(_("No service for compute ID %s") % compute['id']) continue host = service['host'] capabilities = self.service_states.get(host, None) host_state = self.host_state_cls(host, topic, capabilities=capabilities, service=dict(service.iteritems())) host_state.update_from_compute_node(compute) host_state_map[host] = host_state return host_state_map
def detail(self, req): context = req.environ['nova.context'] authorize(context) return dict(hypervisors=[ self._view_hypervisor(hyp, True) for hyp in db.compute_node_get_all(context) ])
def get_all_host_states(self, context): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. """ # Get resource usage across the available compute nodes: compute_nodes = db.compute_node_get_all(context) for compute in compute_nodes: service = compute['service'] if not service: LOG.warn(_("No service for compute ID %s") % compute['id']) continue host = service['host'] node = compute.get('hypervisor_hostname') state_key = (host, node) capabilities = self.service_states.get(state_key, None) host_state = self.host_state_map.get(state_key) if host_state: host_state.update_capabilities(capabilities, dict(service.iteritems())) else: host_state = self.host_state_cls(host, node, capabilities=capabilities, service=dict(service.iteritems())) self.host_state_map[state_key] = host_state host_state.update_from_compute_node(compute) return self.host_state_map.itervalues()
def test_get_all_host_states(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warn') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # node 3 host physical disk space is greater than database host_manager.LOG.warn("Host has more disk space than database expected" " (3333gb > 3072gb)") # Invalid service host_manager.LOG.warn("No service for compute ID 5") self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(host_states_map[state_key].service, compute_node['service']) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608)
def test_refresh_from_db_delete_host(self): self._createInvCache() InventoryCacheManager.get_all_compute_inventory().clear() compute = [] self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute) im = self.inv_manager self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) compute = _create_Compute(compute_id='vmhost1') service = compute['service'] rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['vmhost1'] = \ ComputeInventory(rm_context) vmhost = VmHost() vmhost.set_id('vmhost1') vmhost.set_name('vmhost1') InventoryCacheManager.get_all_compute_inventory( )['vmhost1'].update_compute_info(rm_context, vmhost) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.mox.stubs.UnsetAll() self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertTrue(InventoryCacheManager.get_all_compute_inventory().get( 'compute1') is None) self.mox.UnsetStubs()
def __init__(self): self.attestservice = AttestationService() self.compute_nodes = {} admin = context.get_admin_context() # Fetch compute node list to initialize the compute_nodes, # so that we don't need poll OAT service one by one for each # host in the first round that scheduler invokes us. self.compute_nodes = db.compute_node_get_all(admin)
def test_retry_force_nodes(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties) filter_properties = dict(force_nodes=['force_node']) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) # should not have retry info in the populated filter properties: self.assertNotIn("retry", filter_properties)
def __init__(self): self.utils = host_trust_utils.HostTrustUtils() self.compute_nodes = {} self.admin = context.get_admin_context() # Fetch compute node list to initialize the compute_nodes, # so that we don't need poll OAT service one by one for each # host in the first round that scheduler invokes us. self.compute_nodes = db.compute_node_get_all(self.admin)
def test_get_all_host_states(self): context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warn') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # Invalid service host_manager.LOG.warn("No service for compute ID 5") self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(host_states_map[state_key].service, compute_node['service']) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608)
def test_retry_attempt_one(self): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() instance_properties = {'project_id': '12345', 'os_type': 'Linux'} request_spec = dict(instance_properties=instance_properties, instance_type={}) filter_properties = {} self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts)
def test_refresh_from_db_new(self): self._createInvCache() self.inv_manager_cls._compute_inventory = {} compute = _create_Compute(compute_id='compute1') self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([compute]) im = self.inv_manager self.assertEquals(len(im._compute_inventory), 0) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) self.assertIn('compute1', InventoryCacheManager.get_all_compute_inventory()) self.mox.UnsetStubs()
def test_refresh_from_db_for_service_disabled_created(self): self._createInvCache() self.inv_manager_cls._compute_inventory = {} compute1 = _create_Compute(compute_id='vmhost1') compute1['service']['created_at'] = timeutils.utcnow() - timedelta(1) compute1['service']['updated_at'] = None self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([compute1]) im = self.inv_manager self.assertEquals(len(im._compute_inventory), 0) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertNotIn('compute1', InventoryCacheManager.get_all_compute_inventory())
def index(self, req): ''' return all quotas usage in platform. ''' context = req.environ['nova.context'] authorize(context) hosts = db.compute_node_get_all(context) memory_mb_capacity = 0 local_gb_capacity = 0 ecu_capacity = 0 private_network_qos_capacity = 0 public_network_qos_capacity = 0 for host in hosts: memory_mb_capacity += host.memory_mb local_gb_capacity += host.local_gb ecu_capacity += self._get_host_ecu(req, host).get('capacity') or 0 total_private_network_mbps = host.\ get('total_private_network_mbps') if total_private_network_mbps is not None: private_network_qos_capacity += total_private_network_mbps total_public_network_mbps = host.\ get('total_public_network_mbps') if total_private_network_mbps is not None: public_network_qos_capacity += total_public_network_mbps filters = {"deleted": False} instances = db.instance_get_all_by_filters(context, filters, "created_at", "desc") self.flavors = db.instance_type_get_all(context) usages = dict( ecus={"capacity": ecu_capacity, "ecus": []}, servers={"servers": []}, vcpus={"vcpus": []}, local_gb={"capacity": local_gb_capacity, "local_gb": []}, memory_mb={"capacity": memory_mb_capacity, "memory_mb": []}, network_qos={ "network_qos": [], "private_capacity": private_network_qos_capacity, "public_capacity": public_network_qos_capacity } ) for instance in instances: for key in usages: # Note(hzzhoushaoyu) key in usages should be the same as # list key in each item. 'key' in second parameter is not the # same hierarchy as 'key' in first parameter in usage. self._make_items(req, usages[key], key, instance) # update floating IPs usages.update(floating_ips=self._get_floating_ips(context)) return usages
def test_get_all_host_states(self): self.flags(reserved_host_memory_mb=512, reserved_host_disk_mb=1024) context = 'fake_context' topic = 'compute' self.mox.StubOutWithMock(db, 'compute_node_get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warn') self.mox.StubOutWithMock(db, 'instance_get_all') db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES) # Invalid service host_manager.LOG.warn("No service for compute ID 5") db.instance_get_all(context, columns_to_join=['instance_type']).AndReturn( fakes.INSTANCES) self.mox.ReplayAll() host_states = self.host_manager.get_all_host_states(context, topic) self.assertEqual(len(host_states), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] self.assertEqual(host_states[host].service, compute_node['service']) self.assertEqual(host_states['host1'].free_ram_mb, 0) # 511GB self.assertEqual(host_states['host1'].free_disk_mb, 523264) self.assertEqual(host_states['host2'].free_ram_mb, 512) # 1023GB self.assertEqual(host_states['host2'].free_disk_mb, 1047552) self.assertEqual(host_states['host3'].free_ram_mb, 2560) # 3071GB self.assertEqual(host_states['host3'].free_disk_mb, 3144704) self.assertEqual(host_states['host4'].free_ram_mb, 7680) # 8191GB self.assertEqual(host_states['host4'].free_disk_mb, 8387584)
def test_get_all_host_states(self): # Ensure .service is set and we have the values we expect to. context = 'fake_context' self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node['service']['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(compute_node['service'], host_states_map[state_key].service) # check we have the values we think we should. self.assertEqual(1024, host_states_map[('host1', 'node1uuid')].free_ram_mb) self.assertEqual(10240, host_states_map[('host1', 'node1uuid')].free_disk_mb) self.assertEqual(2048, host_states_map[('host2', 'node2uuid')].free_ram_mb) self.assertEqual(20480, host_states_map[('host2', 'node2uuid')].free_disk_mb) self.assertEqual(3072, host_states_map[('host3', 'node3uuid')].free_ram_mb) self.assertEqual(30720, host_states_map[('host3', 'node3uuid')].free_disk_mb) self.assertEqual(4096, host_states_map[('host4', 'node4uuid')].free_ram_mb) self.assertEqual(40960, host_states_map[('host4', 'node4uuid')].free_disk_mb)
def test_run_instance_no_hosts(self): def _fake_empty_call_zone_method(*args, **kwargs): return [] sched = fakes.FakeFilterScheduler() uuid = 'fake-uuid1' fake_context = context.RequestContext('user', 'project') instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = { 'instance_type': { 'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0 }, 'instance_properties': instance_properties, 'instance_uuids': [uuid] } self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original( fake_context, uuid, { 'vm_state': vm_states.ERROR, 'task_state': None }).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc( fake_context, new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec, None, None, None, None, {}, False)
def __init__(self): self.attestservice = AttestationService() self.compute_nodes = {} admin = context.get_admin_context() # Fetch compute node list to initialize the compute_nodes, # so that we don't need poll OAT service one by one for each # host in the first round that scheduler invokes us. computes = db.compute_node_get_all(admin) for compute in computes: service = compute['service'] if not service: LOG.warn(_("No service for compute ID %s") % compute['id']) continue host = service['host'] self._init_cache_entry(host)
def get_all_host_states(self, context, topic): """Returns a dict of all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. For example: {'192.168.1.100': HostState(), ...} Note: this can be very slow with a lot of instances. InstanceType table isn't required since a copy is stored with the instance (in case the InstanceType changed since the instance was created).""" if topic != 'compute': raise NotImplementedError( _("host_manager only implemented for 'compute'")) host_state_map = {} # Make a compute node dict with the bare essential metrics. compute_nodes = db.compute_node_get_all(context) for compute in compute_nodes: service = compute['service'] if not service: LOG.warn(_("No service for compute ID %s") % compute['id']) continue host = service['host'] capabilities = self.service_states.get(host, None) host_state = self.host_state_cls(host, topic, capabilities=capabilities, service=dict(service.iteritems())) host_state.update_from_compute_node(compute) host_state_map[host] = host_state # "Consume" resources from the host the instance resides on. instances = db.instance_get_all(context, columns_to_join=['instance_type']) for instance in instances: host = instance['host'] if not host: continue host_state = host_state_map.get(host, None) if not host_state: continue host_state.consume_from_instance(instance) return host_state_map
def test_compute_node_get_all(self): date_fields = set(['created_at', 'updated_at', 'deleted_at', 'deleted']) for no_date_fields in [False, True]: nodes = db.compute_node_get_all(self.ctxt, no_date_fields) self.assertEqual(1, len(nodes)) node = nodes[0] self._assertEqualObjects(self.compute_node_dict, node, ignored_keys=self._ignored_keys + ['stats', 'service']) node_fields = set(node.keys()) if no_date_fields: self.assertFalse(date_fields & node_fields) else: self.assertTrue(date_fields <= node_fields) new_stats = jsonutils.loads(node['stats']) self.assertEqual(self.stats, new_stats)
def force_update_host_states(self, context, consumed_hosts): """Force update host states.""" if not consumed_hosts: return consumed_hosts_map = {} for x in consumed_hosts: consumed_hosts_map[(x.obj.host, x.obj.nodename)] = x.obj compute_nodes = db.compute_node_get_all(context) for compute in compute_nodes: service = compute['service'] if not service: LOG.warning(_LW("No service for compute ID %s"), compute['id']) continue state_key = (service['host'], compute.get('hypervisor_hostname')) if state_key in consumed_hosts_map: consumed_hosts_map.get(state_key).update_from_compute_node( compute, force_update=True)