def test_update_inventory(self): self.mox.StubOutWithMock(libvirt, 'openReadOnly') libvirt.openReadOnly(mox.IgnoreArg()).AndReturn(self.fakeConn) self.mox.StubOutWithMock(api, 'vm_save') self.mox.StubOutWithMock(api, 'vm_host_save') self.mox.StubOutWithMock(api, 'storage_volume_save') api.storage_volume_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) api.vm_host_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.ReplayAll() conn = connection.get_connection(True) compute_rmcontext = ComputeRMContext(rmType='QEMU', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['1'] = \ ComputeInventory(compute_rmcontext) conn.init_rmcontext(compute_rmcontext) conn._wrapped_conn = self.fakeConn conn.update_inventory('1')
def test_vm_reconfigured_event(self): mapped_tuple = self._mapLibvirtvmToVm() domainObj = mapped_tuple[0] cachedVm = mapped_tuple[1] cachedVm.name = 'OldName' self.mox.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_object_from_cache') InventoryCacheManager.get_object_from_cache( domainObj.UUIDString(), Constants.Vm).AndReturn(cachedVm) self.mox.ReplayAll() self.libvirtVM._processVm(domainObj) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_VM_RECONFIGURED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'Vm') self.assertEquals(payload['entity_id'], domainObj.UUIDString())
def test_process_updates_for_updated_VM(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.ReplayAll() domainObj = libvirt.virDomain() self.assertEquals( self.libvirtVM.process_updates_for_updated_VM(domainObj), None) vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) # self.assertEquals("TestVirtMgrVM7", vm.get_name()) self.assertEquals("1048576", str(vm.get_memorySize())) self.assertEquals("hd", str(vm.get_bootOrder()).strip()) self.mock.stubs.UnsetAll()
def test_update_inventory(self): self.mox.StubOutWithMock(libvirt, 'openReadOnly') libvirt.openReadOnly(mox.IgnoreArg()).AndReturn(self.fakeConn) self.mox.StubOutWithMock(api, 'vm_save') self.mox.StubOutWithMock(api, 'vm_host_save') self.mox.StubOutWithMock(api, 'storage_volume_save') api.storage_volume_save( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) api.vm_host_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.ReplayAll() conn = connection.get_connection(True) compute_rmcontext = ComputeRMContext(rmType='QEMU', rmIpAddress='10.10.155.165', rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['1'] = \ ComputeInventory(compute_rmcontext) conn.init_rmcontext(compute_rmcontext) conn._wrapped_conn = self.fakeConn conn.update_inventory('1')
def test_vm_save_update(self): ''' Update an existing object in db ''' vm = Vm() vm.id = 'VM1-id' healthnmon_db_api.vm_save(get_admin_context(), vm) vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm.id) vmGlobalSettings.set_autoStartAction('autoStartAction') vmGlobalSettings.set_autoStopAction('autoStopAction') vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm.id]) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) vm = vms[0] self.assertEqual(vm.get_id(), 'VM1-id', "VM id is not same") vmGlobalSets = vm.get_vmGlobalSettings() self.assertTrue(vmGlobalSets is not None) self.assertEqual(vmGlobalSets.get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vmGlobalSets.get_autoStartAction(), 'autoStartAction', "autoStartAction is not same") self.assertEqual(vmGlobalSets.get_autoStopAction(), 'autoStopAction', "autoStopAction is not same")
def test_process_incomplete_vms(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.ReplayAll() libvirt_inventorymonitor.incomplete_vms = \ {self.libvirtVM.compute_id: {'25f04dd3-e924-02b2-9eac-876e3c943262': 1}} self.libvirtVM.process_incomplete_vms() vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) self.assert_(vm.get_vmDisks(), "VM disks inventory not collected") self.assert_( '25f04dd3-e924-02b2-9eac-876e3c943262' not in libvirt_inventorymonitor.incomplete_vms[ self.libvirtVM.compute_id], "VM id not removed from incomplete list") self.mock.stubs.UnsetAll()
def test_vm_save(self): ''' Insert a vm object into db and check whether we are getting proper values after retrieval ''' vm = Vm() vm.id = 'VM1-id' vm.name = 'VM1-Name' vmScsiController = VmScsiController() vmScsiController.set_id('VM_CTRL_1') vmScsiController.set_id('some_type') vm.add_vmScsiControllers(vmScsiController) healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm.id]) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) self.assertEqual(vms[0].get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vms[0].get_name(), 'VM1-Name', "VM name is not same") self.assert_(len(vms[0].get_vmScsiControllers( )) == 1, "vmScsiController len mismatch") self.assert_(vms[0].get_vmScsiControllers()[0].get_id( ) == vmScsiController.get_id(), "vmScsiController id mismatch") self.assert_(vms[0].get_vmScsiControllers()[0].get_type() == vmScsiController.get_type(), "vmScsiController type mismatch")
def test_processVm_disk_exception_next_retry(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().\ AndReturn(None) self.mock.StubOutWithMock(novadb, 'instance_get_by_uuid') novadb.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn({'display_name': 'test_vm'}) self.mock.StubOutWithMock( self.libvirtVM.libvirtconn, 'storageVolLookupByPath') self.libvirtVM.libvirtconn.storageVolLookupByPath( mox.IgnoreArg()).AndRaise(Exception) self.mock.ReplayAll() InventoryCacheManager.delete_object_in_cache( '25f04dd3-e924-02b2-9eac-876e3c943262', Constants.Vm) libvirt_inventorymonitor.incomplete_vms = { self.libvirtVM.compute_id: {}} self.assertEquals(self.libvirtVM._processVm(libvirt.virDomain()), None) vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) self.assertEqual(len( vm.get_vmDisks()), 0, "Instance disk should not exist when there \ is an exception") self.mock.VerifyAll() self.mock.stubs.UnsetAll()
def test_vm_suspended_event(self): mapped_tuple = self._mapLibvirtvmToVm() domainObj = mapped_tuple[0] cachedVm = mapped_tuple[1] cachedVm.powerState = Constants.VM_POWER_STATE_ACTIVE self.mox.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_object_from_cache') InventoryCacheManager.get_object_from_cache( domainObj.UUIDString(), Constants.Vm).AndReturn(cachedVm) self.mox.StubOutWithMock(domainObj, 'state') domainObj.state(0).AndReturn([3]) self.mox.ReplayAll() self.libvirtVM._processVm(domainObj) self.assertTrue(len(test_notifier.NOTIFICATIONS) > 0) msg = \ self._getEventMsgForEventType( event_metadata.EVENT_TYPE_VM_SUSPENDED, test_notifier.NOTIFICATIONS) self.assertTrue(msg is not None) self.assertEquals(msg['priority'], notifier_api.WARN) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_VM_SUSPENDED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'Vm') self.assertEquals(payload['entity_id'], domainObj.UUIDString()) self.assertEquals(payload['state'], Constants.VM_POWER_STATE_PAUSED)
def test_vm_netadpater_save(self): vm = Vm() vm.id = 'VM1' vmNetAdapter = VmNetAdapter() vmNetAdapter.set_id('netAdapter-01') vmNetAdapter.set_name('netAdapter-01') vmNetAdapter.set_addressType('assigned') vmNetAdapter.set_adapterType('E1000') vmNetAdapter.set_switchType('vSwitch') vmNetAdapter.set_macAddress('00:50:56:81:1c:d0') vmNetAdapter.add_ipAddresses('1.1.1.1') vmNetAdapter.set_networkName('br100') vmNetAdapter.set_vlanId(0) vm.add_vmNetAdapters(vmNetAdapter) healthnmon_db_api.vm_save(get_admin_context(), vm) virual_machines = \ healthnmon_db_api.vm_get_by_ids(get_admin_context(), ['VM1' ]) vm_from_db = virual_machines[0] netAdapters = vm_from_db.get_vmNetAdapters() netAdapter = netAdapters[0] self.assertTrue(vmNetAdapter.get_id() == netAdapter.get_id()) healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id]) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm.id]) self.assertTrue(vms is None or len(vms) == 0, 'VM not deleted')
def test_vm_host_get_by_id(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId(host_id) mntPnt.set_path('/path') volume = StorageVolume() volume.set_id('SV11') volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [host_id]) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id)
def test_vm_host_get_all_for_vm(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_(vmids is not None) self.assert_(len(vmids) == 1) self.assert_(vm.id in vmids) healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_((vmids is None) or (len(vmids) == 0))
def test_processVmForIPAddress(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.ReplayAll() self.assertEquals(self.libvirtVM._processVm(libvirt.virDomain()), None) self.libvirtVM.processUpdates() vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) ipProfileList = vm.get_ipAddresses() self.assertTrue(ipProfileList is not None) self.assertTrue(ipProfileList[0].get_ipAddress() == '10.1.1.19') self.assertTrue(ipProfileList[1].get_ipAddress() == '10.2.1.20')
def test_vm_reconfigured_event(self): mapped_tuple = self._mapLibvirtvmToVm() domainObj = mapped_tuple[0] cachedVm = mapped_tuple[1] cachedVm.name = 'OldName' self.mox.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_object_from_cache') InventoryCacheManager.get_object_from_cache( domainObj.UUIDString(), Constants.Vm).AndReturn(cachedVm) self.mox.ReplayAll() self.libvirtVM._processVm(domainObj) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_VM_RECONFIGURED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'Vm') self.assertEquals(payload['entity_id'], domainObj.UUIDString())
def test_processVmForIPAddress(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.ReplayAll() self.assertEquals(self.libvirtVM._processVm(libvirt.virDomain()), None) self.libvirtVM.processUpdates() vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) ipProfileList = vm.get_ipAddresses() self.assertTrue(ipProfileList is not None) self.assertTrue(ipProfileList[0].get_ipAddress() == '10.1.1.19') self.assertTrue(ipProfileList[1].get_ipAddress() == '10.2.1.20')
def test_vm_host_get_by_id(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId(host_id) mntPnt.set_path('/path') volume = StorageVolume() volume.set_id('SV11') volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [host_id]) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue(len(vmhosts) > 0, 'Host get by id returned invalid number of list' ) self.assertTrue(vmhosts[0].id == host_id)
def __create_vm(self, **kwargs): vm = Vm() if kwargs is not None: for field in kwargs: setattr(vm, field, kwargs[field]) healthnmon_db_api.vm_save(self.admin_context, vm) return vm
def test_vm_host_get_all_for_vm(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue(len(vmhosts) > 0, 'Host get by id returned invalid number of list' ) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_(vmids is not None) self.assert_(len(vmids) == 1) self.assert_(vm.id in vmids) healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_((vmids is None) or (len(vmids) == 0))
def test_vm_get_all_by_filters_changessince(self): # Create Vm vm_ids = ('VM1', 'VM2', 'VM3') vm_names = ('name1', 'name2', 'name3') for i in range(len(vm_ids)): self.__create_vm(id=vm_ids[i], name=vm_names[i]) created_time = long(time.time() * 1000L) # Wait for 1 sec and update second vm and delete third vm time.sleep(1) second_vm = healthnmon_db_api.vm_get_by_ids( self.admin_context, [vm_ids[1]])[0] second_vm.name = 'New name' healthnmon_db_api.vm_save(self.admin_context, second_vm) healthnmon_db_api.vm_delete_by_ids(self.admin_context, [vm_ids[2]]) # Query with filter expected_updated_ids = [vm_ids[1], vm_ids[2]] filters = {'changes-since': created_time} vms = healthnmon_db_api.vm_get_all_by_filters( self.admin_context, filters, None, None) self.assert_(vms is not None) self.assert_(len(vms) == 2) for vm in vms: self.assert_(vm is not None) self.assert_(vm.id in expected_updated_ids)
def test_vm_stopped_event(self): mapped_tuple = self._mapLibvirtvmToVm() domainObj = mapped_tuple[0] cachedVm = mapped_tuple[1] cachedVm.powerState = Constants.VM_POWER_STATE_ACTIVE self.mox.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_object_from_cache') InventoryCacheManager.get_object_from_cache( domainObj.UUIDString(), Constants.Vm).AndReturn(cachedVm) self.mox.StubOutWithMock(domainObj, 'state') domainObj.state(0).AndReturn([5]) self.mox.ReplayAll() self.libvirtVM._processVm(domainObj) self.assertTrue(len(test_notifier.NOTIFICATIONS) > 0) msg = \ self._getEventMsgForEventType(event_metadata.EVENT_TYPE_VM_STOPPED, test_notifier.NOTIFICATIONS) self.assertTrue(msg is not None) self.assertEquals(msg['priority'], notifier_api.WARN) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_VM_STOPPED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'Vm') self.assertEquals(payload['entity_id'], domainObj.UUIDString()) self.assertEquals(payload['state'], Constants.VM_POWER_STATE_STOPPED)
def test_vm_created_event(self): domainObj = libvirt.virDomain() self.mox.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_object_from_cache') InventoryCacheManager.get_object_from_cache( domainObj.UUIDString(), Constants.Vm).AndReturn(None) self.mox.ReplayAll() self.libvirtVM._processVm(domainObj) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_VM_CREATED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'Vm') self.assertEquals(payload['entity_id'], domainObj.UUIDString())
def test_process_updates_for_updated_VM(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.ReplayAll() domainObj = libvirt.virDomain() self.assertEquals( self.libvirtVM.process_updates_for_updated_VM(domainObj), None) vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) # self.assertEquals("TestVirtMgrVM7", vm.get_name()) self.assertEquals("1048576", str(vm.get_memorySize())) self.assertEquals("hd", str(vm.get_bootOrder()).strip()) self.mock.stubs.UnsetAll()
def test_vm_save(self): ''' Insert a vm object into db and check whether we are getting proper values after retrieval ''' vm = Vm() vm.id = 'VM1-id' vm.name = 'VM1-Name' vmScsiController = VmScsiController() vmScsiController.set_id('VM_CTRL_1') vmScsiController.set_id('some_type') vm.add_vmScsiControllers(vmScsiController) healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm.id]) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) self.assertEqual(vms[0].get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vms[0].get_name(), 'VM1-Name', "VM name is not same") self.assert_( len(vms[0].get_vmScsiControllers()) == 1, "vmScsiController len mismatch") self.assert_( vms[0].get_vmScsiControllers()[0].get_id() == vmScsiController.get_id(), "vmScsiController id mismatch") self.assert_( vms[0].get_vmScsiControllers()[0].get_type() == vmScsiController.get_type(), "vmScsiController type mismatch")
def test_ProcessUpdatesException(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) mock_libvirtVm = LibvirtVM(self.connection, '1') self.mock.StubOutWithMock(mock_libvirtVm, 'processVmDeletes') mock_libvirtVm.processVmDeletes([], []).AndRaise(Exception) self.mock.ReplayAll() self.assertEquals(self.libvirtVM.processUpdates(), None) self.assertRaises(Exception, LibvirtVM) self.mock.stubs.UnsetAll()
def test_ProcessUpdatesException(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) mock_libvirtVm = LibvirtVM(self.connection, '1') self.mock.StubOutWithMock(mock_libvirtVm, 'processVmDeletes') mock_libvirtVm.processVmDeletes([], []).AndRaise(Exception) self.mock.ReplayAll() self.assertEquals(self.libvirtVM.processUpdates(), None) self.assertRaises(Exception, LibvirtVM) self.mock.stubs.UnsetAll()
def test_vm_get_by_id(self): vm_id = 'VM1' vm = Vm() vm.id = vm_id healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm_id]) self.assertFalse(vms is None, 'VM get by id returned a none list') self.assertTrue( len(vms) > 0, 'VM get by id returned invalid number of list') self.assertTrue(vms[0].id == 'VM1')
def test_vm_get_by_id(self): vm_id = 'VM1' vm = Vm() vm.id = vm_id healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm_id]) self.assertFalse(vms is None, 'VM get by id returned a none list') self.assertTrue(len(vms) > 0, 'VM get by id returned invalid number of list') self.assertTrue(vms[0].id == 'VM1')
def test_vm_delete_none(self): # Initially insert a vm into db and check the length vm = Vm() vm.id = 'VM1-id' healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) # Now delete the None from db healthnmon_db_api.vm_delete_by_ids(get_admin_context(), None) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1)
def test_vm_save_none(self): #Initially insert a vm into db and check the length vm = Vm() vm.id = 'VM1-id' healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) #Now try to save the none and check the length is same as previous healthnmon_db_api.vm_save(get_admin_context(), None) vmsaved = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vmsaved is not None) self.assertTrue(len(vmsaved) == 1)
def test_vm_delete_none(self): #Initially insert a vm into db and check the length vm = Vm() vm.id = 'VM1-id' healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) #Now delete the None from db healthnmon_db_api.vm_delete_by_ids(get_admin_context(), None) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1)
def test_vm_save_none(self): # Initially insert a vm into db and check the length vm = Vm() vm.id = 'VM1-id' healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) # Now try to save the none and check the length is same as previous healthnmon_db_api.vm_save(get_admin_context(), None) vmsaved = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertTrue(vmsaved is not None) self.assertTrue(len(vmsaved) == 1)
def test_vm_delete(self): vm = Vm() vm_id = 'VM1' vm.id = vm_id vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm_id) vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm_id]) self.assertFalse(vms is None, 'VM get by id returned a none list') self.assertTrue( len(vms) > 0, 'VM get by id returned invalid number of list') healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm_id]) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm_id]) self.assertTrue(vms is None or len(vms) == 0, 'VM not deleted')
def test_processVm(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.ReplayAll() self.assertEquals(self.libvirtVM._processVm(libvirt.virDomain()), None) vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) #self.assertEquals('Disconnected', vm.get_connectionState()) # self.assertEquals('TestVirtMgrVM7', str(vm.get_name())) self.assertEquals("1048576", str(vm.get_memorySize())) #self.assertEquals("hd", str(vm.get_bootOrder()).strip()) self.mock.stubs.UnsetAll()
def test_vm_delete(self): vm = Vm() vm_id = 'VM1' vm.id = vm_id vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm_id) vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm_id]) self.assertFalse(vms is None, 'VM get by id returned a none list') self.assertTrue(len(vms) > 0, 'VM get by id returned invalid number of list') healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm_id]) vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), [vm_id]) self.assertTrue(vms is None or len(vms) == 0, 'VM not deleted')
def test_storagevolume_delete(self): storagevolume = StorageVolume() storagevolume_id = 'SV1' storagevolume.id = storagevolume_id vm = Vm() vm.set_id('vm-01') vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id('vm_01') vm.set_vmGlobalSettings(vmGlobalSettings) vmDisk = VmDisk() vmDisk.set_id('disk-01') vmDisk.set_storageVolumeId(storagevolume_id) vm.add_vmDisks(vmDisk) vmDisk = VmDisk() vmDisk.set_id('disk-02') vmDisk.set_storageVolumeId('SV2') vm.add_vmDisks(vmDisk) healthnmon_db_api.vm_save(self.admin_context, vm) healthnmon_db_api.storage_volume_save(self.admin_context, storagevolume) storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(self.admin_context, [storagevolume_id]) self.assertFalse(storagevolumes is None, 'storage volume get by id returned a none list' ) self.assertTrue( len(storagevolumes) > 0, 'storage volume get by id returned invalid number of list') healthnmon_db_api.storage_volume_delete_by_ids(self.admin_context, [storagevolume_id]) storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(self.admin_context, [storagevolume_id]) self.assertTrue(storagevolumes is None or len(storagevolumes) == 0, 'Storage volume not deleted')
def test_process_incomplete_vms_with_retry(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).\ MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.StubOutWithMock( self.libvirtVM.libvirtconn, 'storageVolLookupByPath') self.libvirtVM.libvirtconn.storageVolLookupByPath( mox.IgnoreArg()).AndRaise(Exception) self.mock.ReplayAll() libvirt_inventorymonitor.incomplete_vms = \ {self.libvirtVM.compute_id: { '25f04dd3-e924-02b2-9eac-876e3c943262': 1}} self.libvirtVM.process_incomplete_vms() vm = InventoryCacheManager.get_object_from_cache( "25f04dd3-e924-02b2-9eac-876e3c943262", Constants.Vm) self.assert_(not(vm.get_vmDisks()), "VM disks should not be collected") self.assert_( libvirt_inventorymonitor.incomplete_vms[self.libvirtVM.compute_id][ '25f04dd3-e924-02b2-9eac-876e3c943262'] == 2, "incomplete_vms retry count not incremented") self.mock.stubs.UnsetAll()
def test_process_incomplete_vms_deletedvm(self): self.mock.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(api, 'vm_delete_by_ids') api.vm_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock(nova_db, 'service_get_all_by_topic') nova_db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg())\ .MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVM.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) self.mock.StubOutWithMock( self.libvirtVM.libvirtconn, 'listDefinedDomains') self.libvirtVM.libvirtconn.listDefinedDomains().AndReturn([]) self.mock.StubOutWithMock(self.libvirtVM.libvirtconn, 'listDomainsID') self.libvirtVM.libvirtconn.listDomainsID().AndReturn([]) self.mock.ReplayAll() libvirt_inventorymonitor.incomplete_vms = \ {self.libvirtVM.compute_id: { '25f04dd3-e924-02b2-9eac-876e3c943262': 1}} self.libvirtVM.process_incomplete_vms() self.assert_( '25f04dd3-e924-02b2-9eac-876e3c943262' not in libvirt_inventorymonitor.incomplete_vms[ self.libvirtVM.compute_id], "Deleted VM id not removed from incomplete list") self.mock.stubs.UnsetAll()
def test_storagevolume_delete(self): storagevolume = StorageVolume() storagevolume_id = 'SV1' storagevolume.id = storagevolume_id vm = Vm() vm.set_id('vm-01') vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id('vm_01') vm.set_vmGlobalSettings(vmGlobalSettings) vmDisk = VmDisk() vmDisk.set_id('disk-01') vmDisk.set_storageVolumeId(storagevolume_id) vm.add_vmDisks(vmDisk) vmDisk = VmDisk() vmDisk.set_id('disk-02') vmDisk.set_storageVolumeId('SV2') vm.add_vmDisks(vmDisk) healthnmon_db_api.vm_save(self.admin_context, vm) healthnmon_db_api.storage_volume_save(self.admin_context, storagevolume) storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(self.admin_context, [storagevolume_id]) self.assertFalse(storagevolumes is None, 'storage volume get by id returned a none list') self.assertTrue( len(storagevolumes) > 0, 'storage volume get by id returned invalid number of list') healthnmon_db_api.storage_volume_delete_by_ids(self.admin_context, [storagevolume_id]) storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(self.admin_context, [storagevolume_id]) self.assertTrue(storagevolumes is None or len(storagevolumes) == 0, 'Storage volume not deleted')
def test_vm_get_all(self): ''' Inserts more than one vm object and try to get them all and validates the values ''' vm = Vm() vm.id = 'VM1-id' vm.name = 'VM1-Name' healthnmon_db_api.vm_save(get_admin_context(), vm) vm = Vm() vm.id = 'VM2-id' vm.name = 'VM2-Name' healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertFalse(vms is None, 'vm_get_all returned None') self.assertTrue(len(vms) == 2, 'vm_get_all does not returned expected number of vms') self.assertEqual(vms[0].get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vms[1].get_id(), 'VM2-id', "VM id is not same") self.assertEqual(vms[0].get_name(), 'VM1-Name', "VM Name is not same") self.assertEqual(vms[1].get_name(), 'VM2-Name', "VM Name is not same")
def test_vm_get_all(self): ''' Inserts more than one vm object and try to get them all and validates the values ''' vm = Vm() vm.id = 'VM1-id' vm.name = 'VM1-Name' healthnmon_db_api.vm_save(get_admin_context(), vm) vm = Vm() vm.id = 'VM2-id' vm.name = 'VM2-Name' healthnmon_db_api.vm_save(get_admin_context(), vm) vms = healthnmon_db_api.vm_get_all(get_admin_context()) self.assertFalse(vms is None, 'vm_get_all returned None') self.assertTrue( len(vms) == 2, 'vm_get_all does not returned expected number of vms') self.assertEqual(vms[0].get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vms[1].get_id(), 'VM2-id', "VM id is not same") self.assertEqual(vms[0].get_name(), 'VM1-Name', "VM Name is not same") self.assertEqual(vms[1].get_name(), 'VM2-Name', "VM Name is not same")
def test_vm_get_all_by_filters_changessince(self): # Create Vm vm_ids = ('VM1', 'VM2', 'VM3') vm_names = ('name1', 'name2', 'name3') for i in range(len(vm_ids)): self.__create_vm(id=vm_ids[i], name=vm_names[i]) created_time = long(time.time() * 1000L) # Wait for 1 sec and update second vm and delete third vm time.sleep(1) second_vm = healthnmon_db_api.vm_get_by_ids(self.admin_context, [vm_ids[1]])[0] second_vm.name = 'New name' healthnmon_db_api.vm_save(self.admin_context, second_vm) healthnmon_db_api.vm_delete_by_ids(self.admin_context, [vm_ids[2]]) # Query with filter expected_updated_ids = [vm_ids[1], vm_ids[2]] filters = {'changes-since': created_time} vms = healthnmon_db_api.vm_get_all_by_filters(self.admin_context, filters, None, None) self.assert_(vms is not None) self.assert_(len(vms) == 2) for vm in vms: self.assert_(vm is not None) self.assert_(vm.id in expected_updated_ids)
def test_timestamp_columns(self): """ Test the time stamp columns createEpoch, modifiedEpoch and deletedEpoch """ vm = Vm() vm.set_id('VM1') # Check for createEpoch epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_save(self.admin_context, vm) epoch_after = utils.get_current_epoch_ms() vm_queried = healthnmon_db_api.vm_get_by_ids(self.admin_context, [vm.get_id()])[0] self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, vm_queried.get_createEpoch())) # Check for lastModifiedEpoch and createEpoch after adding VmGlobalSettings vm_modified = vm_queried test_utils.unset_timestamp_fields(vm_modified) vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id('VMGS1') vmGlobalSettings.set_autoStartAction(Constants.AUTO_START_ENABLED) vm_modified.set_vmGlobalSettings(vmGlobalSettings) epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_save(self.admin_context, vm_modified) epoch_after = utils.get_current_epoch_ms() vm_queried = healthnmon_db_api.vm_get_by_ids(self.admin_context, [vm.get_id()])[0] self.assert_( vm_modified.get_createEpoch() == vm_queried.get_createEpoch()) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_lastModifiedEpoch())) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_vmGlobalSettings().get_createEpoch())) # Check for lastModifiedEpoch after modifying vm vm_modified = vm_queried test_utils.unset_timestamp_fields(vm_modified) vm_modified.set_name('changed_name') epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_save(self.admin_context, vm_modified) epoch_after = utils.get_current_epoch_ms() vm_queried = healthnmon_db_api.vm_get_by_ids(self.admin_context, [vm.get_id()])[0] self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_lastModifiedEpoch())) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_vmGlobalSettings().get_lastModifiedEpoch())) self.assert_( vm_modified.get_createEpoch() == vm_queried.get_createEpoch()) self.assert_(vm_modified.get_vmGlobalSettings().get_createEpoch() == vm_queried.get_vmGlobalSettings().get_createEpoch())
def test_vm_created_event(self): domainObj = libvirt.virDomain() self.mox.StubOutWithMock(api, 'vm_save') api.vm_save(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_object_from_cache') InventoryCacheManager.get_object_from_cache( domainObj.UUIDString(), Constants.Vm).AndReturn(None) self.mox.ReplayAll() self.libvirtVM._processVm(domainObj) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_VM_CREATED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'Vm') self.assertEquals(payload['entity_id'], domainObj.UUIDString())
def test_timestamp_columns(self): """ Test the time stamp columns createEpoch, modifiedEpoch and deletedEpoch """ vm = Vm() vm.set_id('VM1') # Check for createEpoch epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_save(self.admin_context, vm) epoch_after = utils.get_current_epoch_ms() vm_queried = healthnmon_db_api.vm_get_by_ids( self.admin_context, [vm.get_id()])[0] self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_createEpoch())) # Check for lastModifiedEpoch and createEpoch # after adding VmGlobalSettings vm_modified = vm_queried test_utils.unset_timestamp_fields(vm_modified) vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id('VMGS1') vmGlobalSettings.set_autoStartAction(Constants.AUTO_START_ENABLED) vm_modified.set_vmGlobalSettings(vmGlobalSettings) epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_save(self.admin_context, vm_modified) epoch_after = utils.get_current_epoch_ms() vm_queried = healthnmon_db_api.vm_get_by_ids( self.admin_context, [vm.get_id()])[0] self.assert_( vm_modified.get_createEpoch() == vm_queried.get_createEpoch()) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_lastModifiedEpoch())) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_vmGlobalSettings().get_createEpoch())) # Check for lastModifiedEpoch after modifying vm vm_modified = vm_queried test_utils.unset_timestamp_fields(vm_modified) vm_modified.set_name('changed_name') epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_save(self.admin_context, vm_modified) epoch_after = utils.get_current_epoch_ms() vm_queried = healthnmon_db_api.vm_get_by_ids( self.admin_context, [vm.get_id()])[0] self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_lastModifiedEpoch())) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vm_queried.get_vmGlobalSettings().get_lastModifiedEpoch())) self.assert_( vm_modified.get_createEpoch() == vm_queried.get_createEpoch()) self.assert_(vm_modified.get_vmGlobalSettings().get_createEpoch() == vm_queried.get_vmGlobalSettings().get_createEpoch())
def __save(self, vmhost, *vms): healthnmon_db_api.vm_host_save(self.admin_context, vmhost) if vms: for vm in vms: healthnmon_db_api.vm_save(self.admin_context, vm)
def test_vm_host_get_all(self): ''' Inserts more than one host with vms and storage volumes. Also validates the data retrieved from the vmhost, vm, storage volumes. ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost = VmHost() vmhost.id = 'VH2-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM1-id' vm.set_vmHostId('VH1-id') vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm.id) vmGlobalSettings.set_autoStartAction('autoStartAction') vmGlobalSettings.set_autoStopAction('autoStopAction') vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId('VH1-id') mntPnt.set_path('/path') volume = StorageVolume() sv_id = 'SV1-id' volume.set_id(sv_id) volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 2, 'vm_host_get_all does not returned expected number of hosts') self.assertEqual(vmhosts[0].get_id(), 'VH1-id', "VMHost id is not same") self.assertEqual(vmhosts[1].get_id(), 'VH2-id', "VMHost id is not same") vmlist = vmhosts[0].get_virtualMachineIds() self.assertFalse(vmlist is None, "virtual machines from the host returned None") self.assertTrue( len(vmlist) == 1, "length of virtual machines list is not returned as expected") self.assertTrue(vm.id in vmlist, "VmId is not in host") vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), ['VM1-id']) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) vm = vms[0] self.assertEqual(vm.get_id(), 'VM1-id', "VM id is not same") vmGlobalSets = vm.get_vmGlobalSettings() self.assertTrue(vmGlobalSets is not None) self.assertEqual(vmGlobalSets.get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vmGlobalSets.get_autoStartAction(), 'autoStartAction', "autoStartAction is not same") self.assertEqual(vmGlobalSets.get_autoStopAction(), 'autoStopAction', "autoStopAction is not same") svlist = vmhosts[0].get_storageVolumeIds() self.assertFalse(svlist is None, "Storage Volumes from the host returned None") self.assertTrue( len(svlist) >= 1, "length of storage volumes list is not returned as expected") self.assertTrue(sv_id in svlist, "Storage Volume Id is not host") storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(get_admin_context(), ['SV1-id']) self.assertFalse(storagevolumes is None, 'Storage volume get by id returned a none list') self.assertTrue( len(storagevolumes) > 0, 'Storage volume get by id returned invalid number of list') self.assertEquals(storagevolumes[0].id, 'SV1-id', "Storage volume id is not same") hostMountPoints = storagevolumes[0].get_mountPoints() self.assertEquals(hostMountPoints[0].get_path(), '/path', "Host mount point path is not same") self.assertEquals(hostMountPoints[0].get_vmHostId(), 'VH1-id', "VmHost id is not same for storage volumes")
def test_vm_host_delete(self): vmhost_id = 'VH1' vmhost = VmHost() vmhost.id = vmhost_id vSwitch = VirtualSwitch() vSwitch.set_id('vSwitch-01') vSwitch.set_name('vSwitch-01') vSwitch.set_resourceManagerId('rmId') vSwitch.set_switchType('vSwitch') cost1 = Cost() cost1.set_value(100) cost1.set_units('USD') vSwitch.set_cost(cost1) portGroup = PortGroup() portGroup.set_id('pg-01') portGroup.set_name('pg-01') portGroup.set_resourceManagerId('rmId') portGroup.set_type('portgroup_type') portGroup.set_cost(cost1) vSwitch.add_portGroups(portGroup) vmhost.add_virtualSwitches(vSwitch) vmhost.add_portGroups(portGroup) healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost2 = VmHost() vmhost2.set_id('VH2') healthnmon_db_api.vm_host_save(get_admin_context(), vmhost2) storage = StorageVolume() storage.set_id('sv-01') storage.set_name('storage-01') storage.set_resourceManagerId('rmId') storage.set_size(1234) storage.set_free(2345) storage.set_vmfsVolume(True) storage.set_shared(True) storage.set_assignedServerCount(1) storage.set_volumeType('VMFS') storage.set_volumeId('101') hostMount1 = HostMountPoint() hostMount1.set_path('test_path1') hostMount1.set_vmHostId('VH1') storage.add_mountPoints(hostMount1) hostMount2 = HostMountPoint() hostMount2.set_path('test_path2') hostMount2.set_vmHostId('VH2') storage.add_mountPoints(hostMount2) healthnmon_db_api.storage_volume_save(get_admin_context(), storage) vm = Vm() vm.set_id('vm-01') vm.set_name('vm-01') vm.set_vmHostId('VH1') healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertFalse(vmhosts is None, 'host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'host get by id returned invalid number of list') # self.assertRaises(Exception, healthnmon_db_api.vm_host_delete_by_ids,([vmhost_id])) healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), [vmhost_id]) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertTrue(vmhosts is None or len(vmhosts) == 0, 'host not deleted')
def test_vm_host_get_all(self): ''' Inserts more than one host with vms and storage volumes. Also validates the data retrieved from the vmhost, vm, storage volumes. ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost = VmHost() vmhost.id = 'VH2-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM1-id' vm.set_vmHostId('VH1-id') vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm.id) vmGlobalSettings.set_autoStartAction('autoStartAction') vmGlobalSettings.set_autoStopAction('autoStopAction') vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId('VH1-id') mntPnt.set_path('/path') volume = StorageVolume() sv_id = 'SV1-id' volume.set_id(sv_id) volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 2, 'vm_host_get_all does not returned expected number of hosts') self.assertEqual(vmhosts[0].get_id(), 'VH1-id', "VMHost id is not same") self.assertEqual(vmhosts[1].get_id(), 'VH2-id', "VMHost id is not same") vmlist = vmhosts[0].get_virtualMachineIds() self.assertFalse(vmlist is None, "virtual machines from the host returned None") self.assertTrue( len(vmlist) == 1, "length of virtual machines list is not returned as expected") self.assertTrue(vm.id in vmlist, "VmId is not in host") vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), ['VM1-id']) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) vm = vms[0] self.assertEqual(vm.get_id(), 'VM1-id', "VM id is not same") vmGlobalSets = vm.get_vmGlobalSettings() self.assertTrue(vmGlobalSets is not None) self.assertEqual(vmGlobalSets.get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vmGlobalSets.get_autoStartAction(), 'autoStartAction', "autoStartAction is not same") self.assertEqual(vmGlobalSets.get_autoStopAction(), 'autoStopAction', "autoStopAction is not same") svlist = vmhosts[0].get_storageVolumeIds() self.assertFalse(svlist is None, "Storage Volumes from the host returned None") self.assertTrue( len(svlist) >= 1, "length of storage volumes list is not returned as expected") self.assertTrue(sv_id in svlist, "Storage Volume Id is not host") storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(get_admin_context(), ['SV1-id']) self.assertFalse(storagevolumes is None, 'Storage volume get by id returned a none list') self.assertTrue( len(storagevolumes) > 0, 'Storage volume get by id returned invalid number of list') self.assertEquals(storagevolumes[0].id, 'SV1-id', "Storage volume id is not same") hostMountPoints = storagevolumes[0].get_mountPoints() self.assertEquals(hostMountPoints[0].get_path(), '/path', "Host mount point path is not same") self.assertEquals( hostMountPoints[0].get_vmHostId(), 'VH1-id', "VmHost id is not same for storage volumes")
def test_vm_host_delete(self): vmhost_id = 'VH1' vmhost = VmHost() vmhost.id = vmhost_id vSwitch = VirtualSwitch() vSwitch.set_id('vSwitch-01') vSwitch.set_name('vSwitch-01') vSwitch.set_resourceManagerId('rmId') vSwitch.set_switchType('vSwitch') cost1 = Cost() cost1.set_value(100) cost1.set_units('USD') vSwitch.set_cost(cost1) portGroup = PortGroup() portGroup.set_id('pg-01') portGroup.set_name('pg-01') portGroup.set_resourceManagerId('rmId') portGroup.set_type('portgroup_type') portGroup.set_cost(cost1) vSwitch.add_portGroups(portGroup) vmhost.add_virtualSwitches(vSwitch) vmhost.add_portGroups(portGroup) healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost2 = VmHost() vmhost2.set_id('VH2') healthnmon_db_api.vm_host_save(get_admin_context(), vmhost2) storage = StorageVolume() storage.set_id('sv-01') storage.set_name('storage-01') storage.set_resourceManagerId('rmId') storage.set_size(1234) storage.set_free(2345) storage.set_vmfsVolume(True) storage.set_shared(True) storage.set_assignedServerCount(1) storage.set_volumeType('VMFS') storage.set_volumeId('101') hostMount1 = HostMountPoint() hostMount1.set_path('test_path1') hostMount1.set_vmHostId('VH1') storage.add_mountPoints(hostMount1) hostMount2 = HostMountPoint() hostMount2.set_path('test_path2') hostMount2.set_vmHostId('VH2') storage.add_mountPoints(hostMount2) healthnmon_db_api.storage_volume_save(get_admin_context(), storage) vm = Vm() vm.set_id('vm-01') vm.set_name('vm-01') vm.set_vmHostId('VH1') healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertFalse(vmhosts is None, 'host get by id returned a none list') self.assertTrue(len(vmhosts) > 0, 'host get by id returned invalid number of list' ) healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), [vmhost_id]) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertTrue(vmhosts is None or len(vmhosts) == 0, 'host not deleted')