def test_vm_host_get_all_for_vm(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_(vmids is not None) self.assert_(len(vmids) == 1) self.assert_(vm.id in vmids) healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_((vmids is None) or (len(vmids) == 0))
def test_vm_host_get_all_for_vm(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM11' vm.set_vmHostId(host_id) healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue(len(vmhosts) > 0, 'Host get by id returned invalid number of list' ) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_(vmids is not None) self.assert_(len(vmids) == 1) self.assert_(vm.id in vmids) healthnmon_db_api.vm_delete_by_ids(get_admin_context(), [vm.id]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) vmids = vmhosts[0].get_virtualMachineIds() self.assert_((vmids is None) or (len(vmids) == 0))
def _createCache(self): self.mox.StubOutWithMock(api, "vm_host_get_all") vmhost = VmHost() vmhost.set_id("vmhost1") vmhost1 = VmHost() vmhost1.set_id("vmhost2") vm = Vm() vm.set_id("vm1") vm.set_powerState(Constants.VM_POWER_STATES[1]) vm.set_vmHostId("vmhost1") vm1 = Vm() vm1.set_id("vm2") vm1.set_powerState(Constants.VM_POWER_STATES[1]) vm1.set_vmHostId("vmhost2") vmhost.set_virtualMachineIds(["vm1", "vm2"]) stPool = StorageVolume() stPool.set_id("stpool1") subNet = Subnet() subNet.set_id("net1") api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost, vmhost1]) self.mox.StubOutWithMock(api, "vm_get_all") api.vm_get_all(mox.IgnoreArg()).AndReturn([vm, vm1]) self.mox.StubOutWithMock(api, "storage_volume_get_all") api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, "subnet_get_all") api.subnet_get_all(mox.IgnoreArg()).AndReturn([subNet])
def test_vm_host_get_all_for_sv(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) mntPnt = HostMountPoint() mntPnt.set_vmHostId(host_id) mntPnt.set_path('/path') volume = StorageVolume() volume.set_id('SV11') volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue(len(vmhosts) > 0, 'Host get by id returned invalid number of list' ) self.assertTrue(vmhosts[0].id == host_id) svlist = vmhosts[0].get_storageVolumeIds() self.assert_(svlist is not None) self.assert_(len(svlist) == 1) self.assert_(volume.get_id() in svlist) healthnmon_db_api.storage_volume_delete_by_ids( get_admin_context(), [volume.get_id()]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) svids = vmhosts[0].get_storageVolumeIds() self.assert_((svids is None) or (len(svids) == 0))
def _createCache(self): self.mox.StubOutWithMock(api, 'vm_host_get_all') vmhost = VmHost() vmhost.set_id('vmhost1') vmhost1 = VmHost() vmhost1.set_id('vmhost2') vm = Vm() vm.set_id('vm1') vm.set_powerState(Constants.VM_POWER_STATES[1]) vm.set_vmHostId('vmhost1') vm1 = Vm() vm1.set_id('vm2') vm1.set_powerState(Constants.VM_POWER_STATES[1]) vm1.set_vmHostId('vmhost2') vmhost.set_virtualMachineIds(['vm1', 'vm2']) stPool = StorageVolume() stPool.set_id('stpool1') subNet = Subnet() subNet.set_id('net1') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost, vmhost1]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([vm, vm1]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([subNet])
def test_vm_host_get_all_for_sv(self): host_id = 'VH1' vmhost = VmHost() vmhost.id = host_id healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) mntPnt = HostMountPoint() mntPnt.set_vmHostId(host_id) mntPnt.set_path('/path') volume = StorageVolume() volume.set_id('SV11') volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'Host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'Host get by id returned invalid number of list') self.assertTrue(vmhosts[0].id == host_id) svlist = vmhosts[0].get_storageVolumeIds() self.assert_(svlist is not None) self.assert_(len(svlist) == 1) self.assert_(volume.get_id() in svlist) healthnmon_db_api.storage_volume_delete_by_ids(get_admin_context(), [volume.get_id()]) vmhosts = \ healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertTrue(vmhosts[0].id == host_id) svids = vmhosts[0].get_storageVolumeIds() self.assert_((svids is None) or (len(svids) == 0))
def test_host_removed_event(self): self.__mock_service_get_all_by_topic() deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory()['compute1'] = \ ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_REMOVED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], deleted_host.id)
def test_host_removed_event(self): self.__mock_service_get_all_by_topic() deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_REMOVED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], deleted_host.id)
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory()['compute1'] = \ ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache( )[Constants.VmHost][deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache()[Constants.VmHost][ deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def _initCache(self): # Read from DB all the vmHost objects and populate # the cache for each IP if cache is empty LOG.info(_('Entering into initCache')) computes = db.compute_node_get_all(get_admin_context()) for compute in computes: compute_id = str(compute['id']) service = compute['service'] self._add_compute_to_inventory(compute[ 'hypervisor_type'], compute_id, service['host']) vmhosts = api.vm_host_get_all(get_admin_context()) vms = api.vm_get_all(get_admin_context()) storageVolumes = api.storage_volume_get_all(get_admin_context()) subNets = api.subnet_get_all(get_admin_context()) self._updateInventory(vmhosts) self._updateInventory(vms) self._updateInventory(storageVolumes) self._updateInventory(subNets) LOG.info(_('Hosts obtained from db: %s') % str(len(vmhosts))) LOG.info(_('Vms obtained from db: %s') % str(len(vms))) LOG.info(_('Storage volumes obtained from db: %s') % str(len(storageVolumes))) LOG.info(_('Subnets obtained from db: %s') % str(len(subNets))) LOG.info(_('Completed the initCache method'))
def _createCache(self): self.mox.StubOutWithMock(api, 'vm_host_get_all') vmhost = VmHost() vmhost.set_id('vmhost1') vm = Vm() vm.set_id('vm1') stPool = StorageVolume() stPool.set_id('stpool1') subnet = Subnet() subnet.set_id('bridge0') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([vm]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([subnet])
def test_vm_host_save_none(self): ''' check the vm_host_save api with none object ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue(len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts') #Now tries to put None object in the db healthnmon_db_api.vm_host_save(get_admin_context(), None) #Again tries to retrieve the vmhost from db and #check it is same as before vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue(len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts')
def test_vm_host_delete_none(self): ''' Check the vm_host_delete_by_ids by passing None as id ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue(len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts') #Now call the delete api by passing the id as None healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), None) #Again try to retrieve the vmhost and check whether its intact vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue(len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts')
def test_load_compute_inventory(self): compute_service = dict(host='host2') compute = dict(id='compute2', hypervisor_type='fake', service=compute_service) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([compute]) self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() InventoryManager() compute_key_lst = InventoryCacheManager.get_all_compute_inventory( ).keys() self.assertTrue(len(compute_key_lst), 2)
def test_vm_host_delete_none(self): ''' Check the vm_host_delete_by_ids by passing None as id ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts') #Now call the delete api by passing the id as None healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), None) #Again try to retrieve the vmhost and check whether its intact vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts')
def test_vm_host_save_none(self): ''' check the vm_host_save api with none object ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts') #Now tries to put None object in the db healthnmon_db_api.vm_host_save(get_admin_context(), None) #Again tries to retrieve the vmhost from db and #check it is same as before vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 1, 'vm_host_get_all does not returned expected number of hosts')
def _initCache(self): # Read from DB all the vmHost objects and populate # the cache for each IP if cache is empty LOG.info(_(" Entering into initCache")) vmhosts = api.vm_host_get_all(get_admin_context()) vms = api.vm_get_all(get_admin_context()) storageVolumes = api.storage_volume_get_all(get_admin_context()) subNets = api.subnet_get_all(get_admin_context()) self._updateInventory(vmhosts) self._updateInventory(vms) self._updateInventory(storageVolumes) self._updateInventory(subNets) LOG.info(_("Hosts obtained from db ") % vmhosts) LOG.info(_("Vms obtained from db ") % vms) LOG.info(_("Storage volumes obtained from db ") % storageVolumes) LOG.info(_("Completed the initCache method"))
def _initCache(self): # Read from DB all the vmHost objects and populate # the cache for each IP if cache is empty LOG.info(_(' Entering into initCache')) vmhosts = api.vm_host_get_all(get_admin_context()) vms = api.vm_get_all(get_admin_context()) storageVolumes = api.storage_volume_get_all(get_admin_context()) subNets = api.subnet_get_all(get_admin_context()) self._updateInventory(vmhosts) self._updateInventory(vms) self._updateInventory(storageVolumes) self._updateInventory(subNets) LOG.info(_('Hosts obtained from db ') % vmhosts) LOG.info(_('Vms obtained from db ') % vms) LOG.info(_('Storage volumes obtained from db ') % storageVolumes) LOG.info(_('Completed the initCache method'))
def test_vm_host_get_all(self): ''' Inserts more than one host with vms and storage volumes. Also validates the data retrieved from the vmhost, vm, storage volumes. ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost = VmHost() vmhost.id = 'VH2-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM1-id' vm.set_vmHostId('VH1-id') vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm.id) vmGlobalSettings.set_autoStartAction('autoStartAction') vmGlobalSettings.set_autoStopAction('autoStopAction') vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId('VH1-id') mntPnt.set_path('/path') volume = StorageVolume() sv_id = 'SV1-id' volume.set_id(sv_id) volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 2, 'vm_host_get_all does not returned expected number of hosts') self.assertEqual(vmhosts[0].get_id(), 'VH1-id', "VMHost id is not same") self.assertEqual(vmhosts[1].get_id(), 'VH2-id', "VMHost id is not same") vmlist = vmhosts[0].get_virtualMachineIds() self.assertFalse(vmlist is None, "virtual machines from the host returned None") self.assertTrue( len(vmlist) == 1, "length of virtual machines list is not returned as expected") self.assertTrue(vm.id in vmlist, "VmId is not in host") vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), ['VM1-id']) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) vm = vms[0] self.assertEqual(vm.get_id(), 'VM1-id', "VM id is not same") vmGlobalSets = vm.get_vmGlobalSettings() self.assertTrue(vmGlobalSets is not None) self.assertEqual(vmGlobalSets.get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vmGlobalSets.get_autoStartAction(), 'autoStartAction', "autoStartAction is not same") self.assertEqual(vmGlobalSets.get_autoStopAction(), 'autoStopAction', "autoStopAction is not same") svlist = vmhosts[0].get_storageVolumeIds() self.assertFalse(svlist is None, "Storage Volumes from the host returned None") self.assertTrue( len(svlist) >= 1, "length of storage volumes list is not returned as expected") self.assertTrue(sv_id in svlist, "Storage Volume Id is not host") storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(get_admin_context(), ['SV1-id']) self.assertFalse(storagevolumes is None, 'Storage volume get by id returned a none list') self.assertTrue( len(storagevolumes) > 0, 'Storage volume get by id returned invalid number of list') self.assertEquals(storagevolumes[0].id, 'SV1-id', "Storage volume id is not same") hostMountPoints = storagevolumes[0].get_mountPoints() self.assertEquals(hostMountPoints[0].get_path(), '/path', "Host mount point path is not same") self.assertEquals( hostMountPoints[0].get_vmHostId(), 'VH1-id', "VmHost id is not same for storage volumes")
def test_vm_host_get_all(self): ''' Inserts more than one host with vms and storage volumes. Also validates the data retrieved from the vmhost, vm, storage volumes. ''' vmhost = VmHost() vmhost.id = 'VH1-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost = VmHost() vmhost.id = 'VH2-id' healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vm = Vm() vm.id = 'VM1-id' vm.set_vmHostId('VH1-id') vmGlobalSettings = VmGlobalSettings() vmGlobalSettings.set_id(vm.id) vmGlobalSettings.set_autoStartAction('autoStartAction') vmGlobalSettings.set_autoStopAction('autoStopAction') vm.set_vmGlobalSettings(vmGlobalSettings) healthnmon_db_api.vm_save(get_admin_context(), vm) mntPnt = HostMountPoint() mntPnt.set_vmHostId('VH1-id') mntPnt.set_path('/path') volume = StorageVolume() sv_id = 'SV1-id' volume.set_id(sv_id) volume.add_mountPoints(mntPnt) healthnmon_db_api.storage_volume_save(get_admin_context(), volume) vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context()) self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None') self.assertTrue( len(vmhosts) == 2, 'vm_host_get_all does not returned expected number of hosts') self.assertEqual(vmhosts[0].get_id(), 'VH1-id', "VMHost id is not same") self.assertEqual(vmhosts[1].get_id(), 'VH2-id', "VMHost id is not same") vmlist = vmhosts[0].get_virtualMachineIds() self.assertFalse(vmlist is None, "virtual machines from the host returned None") self.assertTrue( len(vmlist) == 1, "length of virtual machines list is not returned as expected") self.assertTrue(vm.id in vmlist, "VmId is not in host") vms = healthnmon_db_api.vm_get_by_ids(get_admin_context(), ['VM1-id']) self.assertTrue(vms is not None) self.assertTrue(len(vms) == 1) vm = vms[0] self.assertEqual(vm.get_id(), 'VM1-id', "VM id is not same") vmGlobalSets = vm.get_vmGlobalSettings() self.assertTrue(vmGlobalSets is not None) self.assertEqual(vmGlobalSets.get_id(), 'VM1-id', "VM id is not same") self.assertEqual(vmGlobalSets.get_autoStartAction(), 'autoStartAction', "autoStartAction is not same") self.assertEqual(vmGlobalSets.get_autoStopAction(), 'autoStopAction', "autoStopAction is not same") svlist = vmhosts[0].get_storageVolumeIds() self.assertFalse(svlist is None, "Storage Volumes from the host returned None") self.assertTrue( len(svlist) >= 1, "length of storage volumes list is not returned as expected") self.assertTrue(sv_id in svlist, "Storage Volume Id is not host") storagevolumes = \ healthnmon_db_api.storage_volume_get_by_ids(get_admin_context(), ['SV1-id']) self.assertFalse(storagevolumes is None, 'Storage volume get by id returned a none list') self.assertTrue( len(storagevolumes) > 0, 'Storage volume get by id returned invalid number of list') self.assertEquals(storagevolumes[0].id, 'SV1-id', "Storage volume id is not same") hostMountPoints = storagevolumes[0].get_mountPoints() self.assertEquals(hostMountPoints[0].get_path(), '/path', "Host mount point path is not same") self.assertEquals(hostMountPoints[0].get_vmHostId(), 'VH1-id', "VmHost id is not same for storage volumes")