def _createCache(self): self.mox.StubOutWithMock(api, 'vm_host_get_all') vmhost = VmHost() vmhost.set_id('vmhost1') vmhost1 = VmHost() vmhost1.set_id('vmhost2') vm = Vm() vm.set_id('vm1') vm.set_powerState(Constants.VM_POWER_STATES[1]) vm.set_vmHostId('vmhost1') vm1 = Vm() vm1.set_id('vm2') vm1.set_powerState(Constants.VM_POWER_STATES[1]) vm1.set_vmHostId('vmhost2') vmhost.set_virtualMachineIds(['vm1', 'vm2']) stPool = StorageVolume() stPool.set_id('stpool1') subNet = Subnet() subNet.set_id('net1') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost, vmhost1]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([vm, vm1]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([subNet])
def test_portGroup_added_event(self): cachedHost = VmHost() cachedHost.id = self.libvirtNetwork.compute_id vmhost = VmHost() vmhost.id = self.libvirtNetwork.compute_id vswitch = VirtualSwitch() vswitch.set_id("11") vswitch.set_name("vs1") portGroup = PortGroup() portGroup.set_id("PortGroup_" + vswitch.get_id()) portGroup.set_name(vswitch.get_name()) portGroup.set_virtualSwitchId(vswitch.get_id()) vswitch.set_portGroups([portGroup]) vmhost.set_virtualSwitches([vswitch]) vmhost.set_portGroups([portGroup]) self.libvirtNetwork._processNetworkEvents(cachedHost, vmhost) self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) msg = test_notifier.NOTIFICATIONS[1] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_PORTGROUP_ADDED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'PortGroup') self.assertEquals(payload['entity_id'], portGroup.get_id())
def test_host_disconnected_event(self): self.__mock_service_get_all_by_topic() backedUp_libvirt = connection.libvirt connection.libvirt = libvirt try: compute_id = '1' virtConnection = LibvirtConnection(False) vmHost = VmHost() vmHost.id = compute_id vmHost.set_virtualMachineIds([]) InventoryCacheManager.update_object_in_cache(compute_id, vmHost) # virtConnection.setUuid('34353438-3934-434e-3738-313630323543' # ) virtConnection._wrapped_conn = None virtConnection.compute_rmcontext = \ ComputeRMContext(rmType='KVM', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') cachedHost = VmHost() cachedHost.id = compute_id cachedHost.connectionState = Constants.VMHOST_CONNECTED self.mox.StubOutWithMock(InventoryCacheManager, 'get_object_from_cache') self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVmHost.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) InventoryCacheManager.get_object_from_cache( compute_id, Constants.VmHost).AndReturn(cachedHost) self.mox.StubOutWithMock(api, 'vm_host_save') api.vm_host_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.ReplayAll() libvirtEvents = LibvirtEvents() libvirtVmHost = LibvirtVmHost(virtConnection._wrapped_conn, compute_id, libvirtEvents) libvirtVmHost.processUpdates() self.assertEquals(libvirtVmHost.vmHost.get_connectionState(), Constants.VMHOST_DISCONNECTED) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.CRITICAL) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_DISCONNECTED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], libvirtVmHost.compute_id) finally: connection.libvirt = backedUp_libvirt
def testProcessUpdates_compute_stopped_exception(self): vmHost = VmHost() vmHost.set_id('1') vmHost.set_connectionState(Constants.VMHOST_CONNECTED) InventoryCacheManager.update_object_in_cache('1', vmHost) self.mock.StubOutWithMock(api, 'vm_host_save') api.vm_host_save( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVmHost.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) fake_computes = [{'id': '1', 'service': {'created_at': 'created', 'updated_at':'updated'}}] self.mock.StubOutWithMock(novadb, 'compute_node_get_all') novadb.compute_node_get_all(mox.IgnoreArg()).AndReturn(fake_computes) self.mock.StubOutWithMock(hnm_utils, 'is_service_alive') hnm_utils.is_service_alive( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) self.mock.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update( mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(Exception()) self.mock.ReplayAll() self.assertEquals(self.libvirtVmHost.processUpdates(), None) self.mock.stubs.UnsetAll()
def test_network_enabled_event(self): cachedHost = VmHost() cachedHost.id = self.libvirtNetwork.compute_id vswitch = VirtualSwitch() vswitch.set_id("11") vswitch.set_name("vs1") vswitch.set_connectionState("Inactive") cachedHost.set_virtualSwitches([vswitch]) vmhost = copy.deepcopy(cachedHost) vmhost.get_virtualSwitches()[0].set_connectionState("Active") self.libvirtNetwork._processNetworkEvents(cachedHost, vmhost) self.assertEquals( vmhost.get_virtualSwitches()[0].get_connectionState(), Constants.VIRSWITCH_STATE_ACTIVE) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_NETWORK_ENABLED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VirtualSwitch') self.assertEquals(payload['entity_id'], vswitch.get_id()) self.assertEquals(payload["state"], 'Active')
def setUp(self): super(StorageVolumeEventsTest, self).setUp() self.connection = LibvirtConnection(False) vmHost = VmHost() vmHost.set_storageVolumeIds([]) InventoryCacheManager.update_object_in_cache('1', vmHost) self.connection._wrapped_conn = libvirt.open('qemu:///system') self.LibvirtStorageVolume = \ LibvirtStorageVolume(self.connection._wrapped_conn, '1') self.LibvirtStorageVolume.vmHost = vmHost self.LibvirtStorageVolume.cur_total_storage_size = 0 self.LibvirtStorageVolume.curr_storage_free = 0 self.LibvirtStorageVolume.old_total_storage_size = 0 self.LibvirtStorageVolume.old_storage_free = 0 self.LibvirtStorageVolume.vmHost.set_id('1') self.connection.compute_rmcontext = \ ComputeRMContext(rmType='KVM', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') self.flags( healthnmon_notification_drivers=['nova.notifier.test_notifier']) test_notifier.NOTIFICATIONS = [] self.mox.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None)
def test_vm_host_get_all_for_sv(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) mntPnt = HostMountPoint() mntPnt.set_vmHostId(host_id) mntPnt.set_path('/path') volume = StorageVolume() volume.set_id('SV11') volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id) svlist = vmhosts[0].get_storageVolumeIds() self.assert_(svlist is not None) self.assert_(len(svlist) == 1) self.assert_(volume.get_id() in svlist) healthnmon_db_api.storage_volume_delete_by_ids(get_admin_context(), [volume.get_id()]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) svids = vmhosts[0].get_storageVolumeIds() self.assert_((svids is None) or (len(svids) == 0))
def test_vm_host_get_by_id(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId(host_id) mntPnt.set_path('/path') volume = StorageVolume() volume.set_id('SV11') volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [host_id]) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id)
def test_diff_test_diff_resourcemodel_virtualSwitch_withdelete(self): cachedHost = VmHost() cachedHost.id = '1' vswitch = VirtualSwitch() vswitch.set_id("11") vswitch.set_name("vs1") cachedHost.set_virtualSwitches([vswitch]) vmhost = copy.deepcopy(cachedHost) vmhost.get_virtualSwitches().pop() diff = ResourceModelDiff(cachedHost, vmhost) diff_res = diff.diff_resourcemodel() self.assertTrue(len(diff_res) > 0) self.assertTrue(self.update in diff_res) virtualSwitches = 'virtualSwitches' self.assertTrue(virtualSwitches in diff_res[self.update]) self.assertTrue(self.delete in diff_res[self.update][virtualSwitches]) key = diff_res[self.update][virtualSwitches][self.delete].keys()[0] self.assertTrue( isinstance( diff_res[self.update][virtualSwitches][self.delete][key], VirtualSwitch)) delVirSwitch = diff_res[self.update][virtualSwitches][self.delete][key] self.assertEquals(delVirSwitch.id, '11') self.assertEquals(delVirSwitch.name, 'vs1')
def test_vm_host_get_all_for_vm(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_(vmids is not None) self.assert_(len(vmids) == 1) self.assert_(vm.id in vmids) healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_((vmids is None) or (len(vmids) == 0))
def __create_vm_host(self, **kwargs): vmhost = VmHost() if kwargs is not None: for field in kwargs: setattr(vmhost, field, kwargs[field]) healthnmon_db_api.vm_host_save(self.admin_context, vmhost) return vmhost
def test_host_removed_event(self): self.__mock_service_get_all_by_topic() deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_REMOVED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], deleted_host.id)
def test_vm_host_save_update_with_new_vSwitch(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id vSwitch = VirtualSwitch() vSwitch.set_id('vSwitch-01') vSwitch.set_name('vSwitch-01') vSwitch.set_resourceManagerId('rmId') vSwitch.set_switchType('vSwitch') cost1 = Cost() cost1.set_value(100) cost1.set_units('USD') vSwitch.set_cost(cost1) portGroup = PortGroup() portGroup.set_id('pg-01') portGroup.set_name('pg-01') portGroup.set_resourceManagerId('rmId') portGroup.set_type('portgroup_type') portGroup.set_cost(cost1) vSwitch.add_portGroups(portGroup) vmhost.add_virtualSwitches(vSwitch) vmhost.add_portGroups(portGroup) healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vSwitch_new = VirtualSwitch() vSwitch_new.set_id('vSwitch-02') vSwitch_new.set_name('vSwitch-02') vSwitch_new.set_resourceManagerId('rmId') vSwitch_new.set_switchType('vSwitch') portGroup_new = PortGroup() portGroup_new.set_id('pg-02') portGroup_new.set_name('pg-02') portGroup_new.set_resourceManagerId('rmId') portGroup_new.set_type('portgroup_type') vSwitch.add_portGroups(portGroup_new) vmhost.add_virtualSwitches(vSwitch_new) vmhost.add_portGroups(portGroup_new) healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [host_id]) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue( len(vmhosts[0].get_virtualSwitches()) > 0, 'Host get by virtual switch returned invalid number of list') self.assertTrue( len(vmhosts[0].get_portGroups()) > 0, 'Host get by port group returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id)
def get_host_list(self): host_list = [] host = VmHost() host.set_id('host-01') host.set_name('host-01') host.add_virtualMachineIds('vm-01') host.add_virtualMachineIds('vm-02') host.add_storageVolumeIds('storage-01') host.add_storageVolumeIds('storage-02') host_list.append(host) host = VmHost() host.set_id('host-02') host.set_name('host-02') host.add_virtualMachineIds('vm-03') host.add_virtualMachineIds('vm-04') host.add_storageVolumeIds('storage-03') host.add_storageVolumeIds('storage-04') host_list.append(host) return host_list
def test_network_added_event(self): cachedHost = VmHost() cachedHost.id = self.libvirtNetwork.compute_id vmhost = VmHost() vmhost.id = self.libvirtNetwork.compute_id vswitch = VirtualSwitch() vswitch.set_id("11") vswitch.set_name("vs1") vmhost.set_virtualSwitches([vswitch]) self.libvirtNetwork._processNetworkEvents(cachedHost, vmhost) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_NETWORK_ADDED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VirtualSwitch') self.assertEquals(payload['entity_id'], vswitch.get_id())
def get_limited_list(self, num): host_list = [] for i in range(1, num + 1): host = VmHost() host.set_id('host-' + str(i)) host.set_name('host-' + str(i)) host.add_virtualMachineIds('vm-' + str(i)) host.add_virtualMachineIds('vm-' + str(i)) host.add_storageVolumeIds('storage-' + str(i)) host.add_storageVolumeIds('storage-' + str(i)) host_list.append(host) return host_list
def testVmHostPayloadGenerator(self): metadata = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_ADDED) obj = VmHost() obj.name = 'TestVmHost' ipProfile = IpProfile() ipProfile.ipAddress = '10.10.10.1' obj.add_ipAddresses(ipProfile) payload = payload_generator.generate_payload(metadata, obj) self.assertEquals(payload['entity_type'], obj.__class__.__name__) self.assertEquals(payload['name'], obj.name) self.assertEquals(payload['ipAddresses'], ipProfile.ipAddress)
def test_filtered_ordered_query_changessince_invalid_value(self): # Create VmHost vmhost = VmHost() vmhost.id = 'VH1' healthnmon_db_api.vm_host_save(self.admin_context, vmhost) # Query with invalid changes-since filters = {'changes-since': 'invalid-value'} vmhosts = healthnmon_db_api.vm_host_get_all_by_filters( self.admin_context, filters, None, None) self.assert_(vmhosts is not None) self.assert_(len(vmhosts) == 1) self.assert_(vmhosts[0] is not None) self.assert_(vmhosts[0].id == vmhost.id)
def test_filtered_ordered_query_sort_no_field(self): # Create VmHost vmhost = VmHost() vmhost.id = 'VH1' healthnmon_db_api.vm_host_save(self.admin_context, vmhost) # Query with invalid sort key vmhosts = healthnmon_db_api.vm_host_get_all_by_filters( self.admin_context, None, 'invalidSortField', DbConstants.ORDER_ASC) self.assert_(vmhosts is not None) self.assert_(len(vmhosts) == 1) self.assert_(vmhosts[0] is not None) self.assert_(vmhosts[0].id == vmhost.id)
def setUp(self): self.connection = LibvirtConnection(False) vmHost = VmHost() vmHost.set_storageVolumeIds([]) InventoryCacheManager.update_object_in_cache('1', vmHost) self.connection._wrapped_conn = libvirt.open('qemu:///system') self.LibvirtStorageVolume = \ LibvirtStorageVolume(self.connection._wrapped_conn, '1') self.connection.compute_rmcontext = \ ComputeRMContext(rmType='KVM', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') self.mock = mox.Mox()
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache()[Constants.VmHost][ deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def test_update_object_in_cache(self): ipAddress = IpAddress() newHost = VmHost() InventoryCacheManager.update_object_in_cache('uuid', ipAddress) InventoryCacheManager.update_object_in_cache('uuid1', newHost) self.mox.ReplayAll() eventlet.sleep(2) self.mox.VerifyAll() self.assertTrue( InventoryCacheManager.get_object_from_cache( 'uuid', Constants.VmHost) is None) self.assertTrue( InventoryCacheManager.get_object_from_cache( 'uuid1', Constants.VmHost) is not None) self.mox.UnsetStubs()
def _createCache(self): self.mox.StubOutWithMock(api, 'vm_host_get_all') vmhost = VmHost() vmhost.set_id('vmhost1') vm = Vm() vm.set_id('vm1') stPool = StorageVolume() stPool.set_id('stpool1') subnet = Subnet() subnet.set_id('bridge0') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([vm]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([subnet])
def test_refresh_from_db_delete_host(self): self._createInvCache() InventoryCacheManager.get_all_compute_inventory().clear() compute = [] self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute) im = self.inv_manager self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) compute = _create_Compute(compute_id='vmhost1') service = compute['service'] rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['vmhost1'] = \ ComputeInventory(rm_context) vmhost = VmHost() vmhost.set_id('vmhost1') vmhost.set_name('vmhost1') InventoryCacheManager.get_all_compute_inventory( )['vmhost1'].update_compute_info(rm_context, vmhost) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.mox.stubs.UnsetAll() self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertTrue(InventoryCacheManager.get_all_compute_inventory().get( 'compute1') is None) self.mox.UnsetStubs()
def setUp(self): self.connection = LibvirtConnection(False) self.vmHost = VmHost() self.vmHost.set_virtualMachineIds([]) InventoryCacheManager.update_object_in_cache('1', self.vmHost) #self.connection.setUuid('34353438-3934-434e-3738-313630323543') self.connection._wrapped_conn = libvirt.open('qemu:///system') self.libvirtVM = LibvirtVM(self.connection._wrapped_conn, '1') self.libvirtVM.vmHost.set_id('1') self.connection.compute_rmcontext = \ ComputeRMContext(rmType='QEMU', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') self.mock = mox.Mox() cfg.CONF.set_override('healthnmon_notification_drivers', ['healthnmon.notifier.log_notifier'])
def createInvCache(self, vmrunning, hostconnection='Connected'): vmhost = VmHost() vmhost.set_id(self.vmhost_id) vmhost.set_connectionState(hostconnection) vm = Vm() vm.set_id(self.vm_id) if vmrunning: vm.set_powerState(Constants.VM_POWER_STATES[1]) else: vm.set_powerState(Constants.VM_POWER_STATES[0]) vm.set_vmHostId(self.vmhost_id) vmhost.set_virtualMachineIds([self.vm_id]) vmhost.set_processorSpeedMhz(2100) vmhost.set_processorCoresCount(4) vmhost.set_processorCount('2') vmhost.set_memorySize(2097152) vmhost.set_memoryConsumed(2097152) InventoryCacheManager.update_object_in_cache(self.vmhost_id, vmhost) InventoryCacheManager.update_object_in_cache(self.vm_id, vm)
def setUp(self): self.connection = LibvirtConnection(False) vmHost = VmHost() vSwitch = VirtualSwitch() vSwitch.set_id('52:54:00:34:14:AE') vSwitch.set_name('default') vSwitch.set_switchType('nat') vmHost.set_virtualSwitches([vSwitch]) InventoryCacheManager.update_object_in_cache('1', vmHost) #self.connection.setUuid('34353438-3934-434e-3738-313630323543') self.connection._wrapped_conn = libvirt.open('qemu:///system') self.connection.compute_rmcontext = \ ComputeRMContext(rmType='KVM', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') self.LibvirtNetwork = LibvirtNetwork(self.connection, '1') self.mock = mox.Mox() cfg.CONF.set_override('healthnmon_notification_drivers', ['healthnmon.notifier.log_notifier'])
def test_vm_host_delete_none(self): ''' Check the vm_host_delete_by_ids by passing None as id ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts') #Now call the delete api by passing the id as None healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), None) #Again try to retrieve the vmhost and check whether its intact vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts')
def test_vm_host_save_none(self): ''' check the vm_host_save api with none object ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts') #Now tries to put None object in the db healthnmon_db_api.vm_host_save(get_admin_context(), None) #Again tries to retrieve the vmhost from db and #check it is same as before vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts')
def setUp(self): self.mock = mox.Mox() self.connection = LibvirtConnection(False) self.connection._wrapped_conn = libvirt.open('qemu:///system') vmHost = VmHost() InventoryCacheManager.update_object_in_cache('1', vmHost) self.connection.compute_rmcontext = \ ComputeRMContext(rmType='fake', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['1'] = \ ComputeInventory(self.connection.compute_rmcontext) self.mock.StubOutWithMock(LibvirtConnection, '_connect') self.connection._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(libvirt.libvirtError) self.mock.ReplayAll() self.inventoryMonitor = LibvirtInventoryMonitor() # self.libvirtVmHost = LibvirtVmHost(self.connection, '1') cfg.CONF.set_override('healthnmon_notification_drivers', ['healthnmon.notifier.log_notifier']) self.libvirtInventoryMonitor = LibvirtInventoryMonitor()