def testProcessUpdates_compute_stopped_exception(self): vmHost = VmHost() vmHost.set_id('1') vmHost.set_connectionState(Constants.VMHOST_CONNECTED) InventoryCacheManager.update_object_in_cache('1', vmHost) self.mock.StubOutWithMock(api, 'vm_host_save') api.vm_host_save( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mock.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( self.libvirtVmHost.compute_id, Constants.VmHost).AndReturn(fake.get_connection()) fake_computes = [{'id': '1', 'service': {'created_at': 'created', 'updated_at':'updated'}}] self.mock.StubOutWithMock(novadb, 'compute_node_get_all') novadb.compute_node_get_all(mox.IgnoreArg()).AndReturn(fake_computes) self.mock.StubOutWithMock(hnm_utils, 'is_service_alive') hnm_utils.is_service_alive( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) self.mock.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update( mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(Exception()) self.mock.ReplayAll() self.assertEquals(self.libvirtVmHost.processUpdates(), None) self.mock.stubs.UnsetAll()
def _createCache(self): self.mox.StubOutWithMock(api, 'vm_host_get_all') vmhost = VmHost() vmhost.set_id('vmhost1') vmhost1 = VmHost() vmhost1.set_id('vmhost2') vm = Vm() vm.set_id('vm1') vm.set_powerState(Constants.VM_POWER_STATES[1]) vm.set_vmHostId('vmhost1') vm1 = Vm() vm1.set_id('vm2') vm1.set_powerState(Constants.VM_POWER_STATES[1]) vm1.set_vmHostId('vmhost2') vmhost.set_virtualMachineIds(['vm1', 'vm2']) stPool = StorageVolume() stPool.set_id('stpool1') subNet = Subnet() subNet.set_id('net1') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost, vmhost1]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([vm, vm1]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([subNet])
def _createCache(self): self.mox.StubOutWithMock(api, "vm_host_get_all") vmhost = VmHost() vmhost.set_id("vmhost1") vmhost1 = VmHost() vmhost1.set_id("vmhost2") vm = Vm() vm.set_id("vm1") vm.set_powerState(Constants.VM_POWER_STATES[1]) vm.set_vmHostId("vmhost1") vm1 = Vm() vm1.set_id("vm2") vm1.set_powerState(Constants.VM_POWER_STATES[1]) vm1.set_vmHostId("vmhost2") vmhost.set_virtualMachineIds(["vm1", "vm2"]) stPool = StorageVolume() stPool.set_id("stpool1") subNet = Subnet() subNet.set_id("net1") api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost, vmhost1]) self.mox.StubOutWithMock(api, "vm_get_all") api.vm_get_all(mox.IgnoreArg()).AndReturn([vm, vm1]) self.mox.StubOutWithMock(api, "storage_volume_get_all") api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, "subnet_get_all") api.subnet_get_all(mox.IgnoreArg()).AndReturn([subNet])
def skiptestProcessUpdates_network_stopped(self): vmHost = VmHost() vmHost.set_id('1') vmHost.set_connectionState(Constants.VMHOST_CONNECTED) InventoryCacheManager.update_object_in_cache('1', vmHost) self.mock.StubOutWithMock( self.libvirtVmHost, '_get_compute_running_status') self.libvirtVmHost._get_compute_running_status().AndReturn( (True, 'host')) self.mock.StubOutWithMock(api, 'vm_host_save') api.vm_host_save( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) fake_networks = { 'id': '1', 'created_at': 'created', 'updated_at': 'updated'} self.mock.StubOutWithMock(novadb, 'service_get_by_host_and_topic') novadb.service_get_by_host_and_topic( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_networks) self.mock.StubOutWithMock(hnm_utils, 'is_service_alive') hnm_utils.is_service_alive( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) self.mock.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.mock.ReplayAll() self.assertEquals(self.libvirtVmHost.processUpdates(), None) self.assertEquals( self.libvirtVmHost.cachedvmHost.get_connectionState(), 'Disconnected') self.mock.stubs.UnsetAll()
def test_host_removed_event(self): self.__mock_service_get_all_by_topic() deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory()['compute1'] = \ ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_REMOVED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], deleted_host.id)
def test_host_removed_event(self): self.__mock_service_get_all_by_topic() deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], notifier_api.INFO) event_type = \ event_metadata.get_EventMetaData( event_metadata.EVENT_TYPE_HOST_REMOVED) self.assertEquals(msg['event_type'], event_type.get_event_fully_qal_name()) payload = msg['payload'] self.assertEquals(payload['entity_type'], 'VmHost') self.assertEquals(payload['entity_id'], deleted_host.id)
def get_single_host(self): host_list = [] host = VmHost() host.set_id('host-01') host.set_name('host-01') host.add_virtualMachineIds('vm-01') host.add_virtualMachineIds('vm-02') host.add_storageVolumeIds('storage-01') host.add_storageVolumeIds('storage-02') host_list.append(host) return host_list
def get_limited_list(self, num): host_list = [] for i in range(1, num + 1): host = VmHost() host.set_id('host-' + str(i)) host.set_name('host-' + str(i)) host.add_virtualMachineIds('vm-' + str(i)) host.add_virtualMachineIds('vm-' + str(i)) host.add_storageVolumeIds('storage-' + str(i)) host.add_storageVolumeIds('storage-' + str(i)) host_list.append(host) return host_list
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock( InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory()['compute1'] = \ ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache( )[Constants.VmHost][deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def test_host_removed_event_none_host(self): deleted_host = VmHost() deleted_host.set_id('compute1') deleted_host.set_name('compute1') self.mox.StubOutWithMock(api, 'vm_host_get_all') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(nova_db, 'compute_node_get_all') nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(InventoryCacheManager, 'get_compute_conn_driver') InventoryCacheManager.get_compute_conn_driver( 'compute1', Constants.VmHost).AndReturn(fake.get_connection()) self.mox.ReplayAll() compute_service = dict(host='host1') compute = dict(id='compute1', hypervisor_type='fake', service=compute_service) rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=compute_service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory().clear() InventoryCacheManager.get_all_compute_inventory( )['compute1'] = ComputeInventory(rm_context) InventoryCacheManager.get_compute_inventory( 'compute1').update_compute_info(rm_context, deleted_host) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 1) InventoryCacheManager.get_inventory_cache()[Constants.VmHost][ deleted_host.get_id()] = None inv_manager = InventoryManager() inv_manager._refresh_from_db(None) self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
def test_refresh_from_db_delete_host(self): self._createInvCache() InventoryCacheManager.get_all_compute_inventory().clear() compute = [] self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute) im = self.inv_manager self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) compute = _create_Compute(compute_id='vmhost1') service = compute['service'] rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'], rmIpAddress=service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['vmhost1'] = \ ComputeInventory(rm_context) vmhost = VmHost() vmhost.set_id('vmhost1') vmhost.set_name('vmhost1') InventoryCacheManager.get_all_compute_inventory( )['vmhost1'].update_compute_info(rm_context, vmhost) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.mox.stubs.UnsetAll() self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertTrue(InventoryCacheManager.get_all_compute_inventory( ).get('compute1') is None) self.mox.UnsetStubs()
def _createCache(self): self.mox.StubOutWithMock(api, 'vm_host_get_all') vmhost = VmHost() vmhost.set_id('vmhost1') vm = Vm() vm.set_id('vm1') stPool = StorageVolume() stPool.set_id('stpool1') subnet = Subnet() subnet.set_id('bridge0') api.vm_host_get_all(mox.IgnoreArg()).AndReturn([vmhost]) self.mox.StubOutWithMock(api, 'vm_get_all') api.vm_get_all(mox.IgnoreArg()).AndReturn([vm]) self.mox.StubOutWithMock(api, 'storage_volume_get_all') api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([stPool]) self.mox.StubOutWithMock(api, 'subnet_get_all') api.subnet_get_all(mox.IgnoreArg()).AndReturn([subnet])
def test_refresh_from_db_delete_host(self): self._createInvCache() InventoryCacheManager.get_all_compute_inventory().clear() compute = [] self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute) im = self.inv_manager self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) compute = _create_Compute(compute_id='vmhost1') service = compute['service'] rm_context = \ rmcontext.ComputeRMContext(rmType=compute['hypervisor_type' ], rmIpAddress=service['host'], rmUserName='******', rmPassword='******') InventoryCacheManager.get_all_compute_inventory()['vmhost1'] = \ ComputeInventory(rm_context) vmhost = VmHost() vmhost.set_id('vmhost1') vmhost.set_name('vmhost1') InventoryCacheManager.get_all_compute_inventory( )['vmhost1'].update_compute_info(rm_context, vmhost) self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids') api.vm_host_delete_by_ids( mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(event_api, 'notify_host_update') event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.mox.stubs.UnsetAll() self.assertEquals( len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertTrue(InventoryCacheManager.get_all_compute_inventory().get( 'compute1') is None) self.mox.UnsetStubs()
def createInvCache(self, vmrunning, hostconnection='Connected'): vmhost = VmHost() vmhost.set_id(self.vmhost_id) vmhost.set_connectionState(hostconnection) vm = Vm() vm.set_id(self.vm_id) if vmrunning: vm.set_powerState(Constants.VM_POWER_STATES[1]) else: vm.set_powerState(Constants.VM_POWER_STATES[0]) vm.set_vmHostId(self.vmhost_id) vmhost.set_virtualMachineIds([self.vm_id]) vmhost.set_processorSpeedMhz(2100) vmhost.set_processorCoresCount(4) vmhost.set_processorCount('2') vmhost.set_memorySize(2097152) vmhost.set_memoryConsumed(2097152) InventoryCacheManager.update_object_in_cache(self.vmhost_id, vmhost) InventoryCacheManager.update_object_in_cache(self.vm_id, vm)
def test_refresh_from_db_delete_host(self): self._createInvCache() InventoryCacheManager.get_all_compute_inventory().clear() compute = [] self.mox.StubOutWithMock(db, "compute_node_get_all") db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute) im = self.inv_manager self.assertEquals(len(InventoryCacheManager.get_all_compute_inventory()), 0) compute = _create_Compute(compute_id="vmhost1") service = compute["service"] rm_context = rmcontext.ComputeRMContext( rmType=compute["hypervisor_type"], rmIpAddress=service["host"], rmUserName="******", rmPassword="******", ) InventoryCacheManager.get_all_compute_inventory()["vmhost1"] = ComputeInventory(rm_context) vmhost = VmHost() vmhost.set_id("vmhost1") vmhost.set_name("vmhost1") InventoryCacheManager.get_all_compute_inventory()["vmhost1"].update_compute_info(rm_context, vmhost) self.mox.StubOutWithMock(api, "vm_host_delete_by_ids") api.vm_host_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None) self.mox.StubOutWithMock(event_api, "notify_host_update") event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() im._refresh_from_db(None) self.mox.VerifyAll() self.mox.stubs.UnsetAll() self.assertEquals(len(InventoryCacheManager.get_all_compute_inventory()), 0) self.assertTrue(InventoryCacheManager.get_all_compute_inventory().get("compute1") is None) self.mox.UnsetStubs()
def createInvCache(self, vmrunning, hostconnection='Connected'): vmhost = VmHost() vmhost.set_id(self.vmhost_id) vmhost.set_connectionState(hostconnection) vm = Vm() vm.set_id(self.vm_id) if vmrunning: vm.set_powerState(Constants.VM_POWER_STATES[1]) else: vm.set_powerState(Constants.VM_POWER_STATES[0]) vm.set_vmHostId(self.vmhost_id) vmhost.set_virtualMachineIds([self.vm_id ]) vmhost.set_processorSpeedMhz(2100) vmhost.set_processorCoresCount(4) vmhost.set_processorCount('2') vmhost.set_memorySize(2097152) vmhost.set_memoryConsumed(2097152) InventoryCacheManager.update_object_in_cache(self.vmhost_id, vmhost) InventoryCacheManager.update_object_in_cache( self.vm_id, vm)
def test_timestamp_columns(self): """ Test the time stamp columns createEpoch, modifiedEpoch and deletedEpoch """ vmhost = VmHost() vmhost.set_id('VH1') virSw1 = VirtualSwitch() virSw1.set_id('VS1_VH1') portGrp1 = PortGroup() portGrp1.set_id('PG1_VH1') virSw1.add_portGroups(portGrp1) vmhost.add_virtualSwitches(virSw1) vmhost.add_portGroups(portGrp1) # Check for createEpoch epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_save(self.admin_context, vmhost) epoch_after = utils.get_current_epoch_ms() vmhost_queried = healthnmon_db_api.vm_host_get_by_ids( self.admin_context, [vmhost.get_id()])[0] self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, vmhost_queried.get_createEpoch())) for virSw in vmhost_queried.get_virtualSwitches(): self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, virSw.get_createEpoch())) for pg in virSw.get_portGroups(): self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, pg.get_createEpoch())) # Check for lastModifiedEpoch after modifying host vmhost_modified = vmhost_queried test_utils.unset_timestamp_fields(vmhost_modified) vmhost_modified.set_name('changed_name') epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified) epoch_after = utils.get_current_epoch_ms() vmhost_queried = healthnmon_db_api.vm_host_get_by_ids( self.admin_context, [vmhost.get_id()])[0] self.assert_(vmhost_modified.get_createEpoch() == vmhost_queried.get_createEpoch()) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch())) for virSw in vmhost_queried.get_virtualSwitches(): self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, virSw.get_lastModifiedEpoch())) for pg in virSw.get_portGroups(): self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_lastModifiedEpoch())) # Check for createdEpoch after adding switch and portgroup to host vmhost_modified = vmhost_queried test_utils.unset_timestamp_fields(vmhost_modified) virSw2 = VirtualSwitch() virSw2.set_id('VS2_VH1') portGrp2 = PortGroup() portGrp2.set_id('PG2_VH1') virSw2.add_portGroups(portGrp2) vmhost_modified.add_virtualSwitches(virSw2) vmhost_modified.add_portGroups(portGrp2) epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified) epoch_after = utils.get_current_epoch_ms() vmhost_queried = healthnmon_db_api.vm_host_get_by_ids( self.admin_context, [vmhost.get_id()])[0] self.assert_(vmhost_modified.get_createEpoch() == vmhost_queried.get_createEpoch()) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch())) for virSw in vmhost_queried.get_virtualSwitches(): if virSw.get_id() == virSw2.get_id(): self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, virSw.get_createEpoch())) else: self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, virSw.get_lastModifiedEpoch())) for pg in virSw.get_portGroups(): if pg.get_id() == portGrp2.get_id(): self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_createEpoch())) else: self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_lastModifiedEpoch())) # Check for deletedEpoch epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_delete_by_ids(self.admin_context, [vmhost_queried.get_id()]) epoch_after = utils.get_current_epoch_ms() deleted_host = healthnmon_db_api.vm_host_get_all_by_filters( self.admin_context, {"id": vmhost_queried.get_id()}, None, None)[0] self.assertTrue(deleted_host.get_deleted()) self.assert_( test_utils.is_timestamp_between(epoch_before, epoch_after, deleted_host.get_deletedEpoch())) deleted_switches = healthnmon_db_api.virtual_switch_get_all_by_filters( self.admin_context, {"id": (virSw1.get_id(), virSw2.get_id())}, None, None) for deleted_switch in deleted_switches: self.assertTrue(deleted_switch.get_deleted()) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, deleted_switch.get_deletedEpoch())) for deleted_portgrp in deleted_switch.get_portGroups(): self.assertTrue(deleted_portgrp.get_deleted()) self.assert_( test_utils.is_timestamp_between( epoch_before, epoch_after, deleted_portgrp.get_deletedEpoch()))
def test_timestamp_columns(self): """ Test the time stamp columns createEpoch, modifiedEpoch and deletedEpoch """ vmhost = VmHost() vmhost.set_id('VH1') virSw1 = VirtualSwitch() virSw1.set_id('VS1_VH1') portGrp1 = PortGroup() portGrp1.set_id('PG1_VH1') virSw1.add_portGroups(portGrp1) vmhost.add_virtualSwitches(virSw1) vmhost.add_portGroups(portGrp1) # Check for createEpoch epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_save(self.admin_context, vmhost) epoch_after = utils.get_current_epoch_ms() vmhost_queried = healthnmon_db_api.vm_host_get_by_ids( self.admin_context, [vmhost.get_id()])[0] self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vmhost_queried.get_createEpoch())) for virSw in vmhost_queried.get_virtualSwitches(): self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, virSw.get_createEpoch())) for pg in virSw.get_portGroups(): self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_createEpoch())) # Check for lastModifiedEpoch after modifying host vmhost_modified = vmhost_queried test_utils.unset_timestamp_fields(vmhost_modified) vmhost_modified.set_name('changed_name') epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified) epoch_after = utils.get_current_epoch_ms() vmhost_queried = healthnmon_db_api.vm_host_get_by_ids( self.admin_context, [vmhost.get_id()])[0] self.assert_(vmhost_modified.get_createEpoch( ) == vmhost_queried.get_createEpoch()) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch())) for virSw in vmhost_queried.get_virtualSwitches(): self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, virSw.get_lastModifiedEpoch())) for pg in virSw.get_portGroups(): self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_lastModifiedEpoch())) # Check for createdEpoch after adding switch and portgroup to host vmhost_modified = vmhost_queried test_utils.unset_timestamp_fields(vmhost_modified) virSw2 = VirtualSwitch() virSw2.set_id('VS2_VH1') portGrp2 = PortGroup() portGrp2.set_id('PG2_VH1') virSw2.add_portGroups(portGrp2) vmhost_modified.add_virtualSwitches(virSw2) vmhost_modified.add_portGroups(portGrp2) epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified) epoch_after = utils.get_current_epoch_ms() vmhost_queried = healthnmon_db_api.vm_host_get_by_ids( self.admin_context, [vmhost.get_id()])[0] self.assert_(vmhost_modified.get_createEpoch( ) == vmhost_queried.get_createEpoch()) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch())) for virSw in vmhost_queried.get_virtualSwitches(): if virSw.get_id() == virSw2.get_id(): self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, virSw.get_createEpoch())) else: self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, virSw.get_lastModifiedEpoch())) for pg in virSw.get_portGroups(): if pg.get_id() == portGrp2.get_id(): self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_createEpoch())) else: self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, pg.get_lastModifiedEpoch())) # Check for deletedEpoch epoch_before = utils.get_current_epoch_ms() healthnmon_db_api.vm_host_delete_by_ids( self.admin_context, [vmhost_queried.get_id()]) epoch_after = utils.get_current_epoch_ms() deleted_host = healthnmon_db_api.vm_host_get_all_by_filters( self.admin_context, {"id": vmhost_queried.get_id()}, None, None)[0] self.assertTrue(deleted_host.get_deleted()) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, deleted_host.get_deletedEpoch())) deleted_switches = healthnmon_db_api.\ virtual_switch_get_all_by_filters(self.admin_context, {"id": (virSw1.get_id(), virSw2.get_id())}, None, None) for deleted_switch in deleted_switches: self.assertTrue(deleted_switch.get_deleted()) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, deleted_switch.get_deletedEpoch())) for deleted_portgrp in deleted_switch.get_portGroups(): self.assertTrue(deleted_portgrp.get_deleted()) self.assert_(test_utils.is_timestamp_between( epoch_before, epoch_after, deleted_portgrp.get_deletedEpoch()))
def test_vm_host_delete(self): vmhost_id = 'VH1' vmhost = VmHost() vmhost.id = vmhost_id vSwitch = VirtualSwitch() vSwitch.set_id('vSwitch-01') vSwitch.set_name('vSwitch-01') vSwitch.set_resourceManagerId('rmId') vSwitch.set_switchType('vSwitch') cost1 = Cost() cost1.set_value(100) cost1.set_units('USD') vSwitch.set_cost(cost1) portGroup = PortGroup() portGroup.set_id('pg-01') portGroup.set_name('pg-01') portGroup.set_resourceManagerId('rmId') portGroup.set_type('portgroup_type') portGroup.set_cost(cost1) vSwitch.add_portGroups(portGroup) vmhost.add_virtualSwitches(vSwitch) vmhost.add_portGroups(portGroup) healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost2 = VmHost() vmhost2.set_id('VH2') healthnmon_db_api.vm_host_save(get_admin_context(), vmhost2) storage = StorageVolume() storage.set_id('sv-01') storage.set_name('storage-01') storage.set_resourceManagerId('rmId') storage.set_size(1234) storage.set_free(2345) storage.set_vmfsVolume(True) storage.set_shared(True) storage.set_assignedServerCount(1) storage.set_volumeType('VMFS') storage.set_volumeId('101') hostMount1 = HostMountPoint() hostMount1.set_path('test_path1') hostMount1.set_vmHostId('VH1') storage.add_mountPoints(hostMount1) hostMount2 = HostMountPoint() hostMount2.set_path('test_path2') hostMount2.set_vmHostId('VH2') storage.add_mountPoints(hostMount2) healthnmon_db_api.storage_volume_save(get_admin_context(), storage) vm = Vm() vm.set_id('vm-01') vm.set_name('vm-01') vm.set_vmHostId('VH1') healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertFalse(vmhosts is None, 'host get by id returned a none list') self.assertTrue(len(vmhosts) > 0, 'host get by id returned invalid number of list' ) healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), [vmhost_id]) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertTrue(vmhosts is None or len(vmhosts) == 0, 'host not deleted')
def test_vm_host_delete(self): vmhost_id = 'VH1' vmhost = VmHost() vmhost.id = vmhost_id vSwitch = VirtualSwitch() vSwitch.set_id('vSwitch-01') vSwitch.set_name('vSwitch-01') vSwitch.set_resourceManagerId('rmId') vSwitch.set_switchType('vSwitch') cost1 = Cost() cost1.set_value(100) cost1.set_units('USD') vSwitch.set_cost(cost1) portGroup = PortGroup() portGroup.set_id('pg-01') portGroup.set_name('pg-01') portGroup.set_resourceManagerId('rmId') portGroup.set_type('portgroup_type') portGroup.set_cost(cost1) vSwitch.add_portGroups(portGroup) vmhost.add_virtualSwitches(vSwitch) vmhost.add_portGroups(portGroup) healthnmon_db_api.vm_host_save(get_admin_context(), vmhost) vmhost2 = VmHost() vmhost2.set_id('VH2') healthnmon_db_api.vm_host_save(get_admin_context(), vmhost2) storage = StorageVolume() storage.set_id('sv-01') storage.set_name('storage-01') storage.set_resourceManagerId('rmId') storage.set_size(1234) storage.set_free(2345) storage.set_vmfsVolume(True) storage.set_shared(True) storage.set_assignedServerCount(1) storage.set_volumeType('VMFS') storage.set_volumeId('101') hostMount1 = HostMountPoint() hostMount1.set_path('test_path1') hostMount1.set_vmHostId('VH1') storage.add_mountPoints(hostMount1) hostMount2 = HostMountPoint() hostMount2.set_path('test_path2') hostMount2.set_vmHostId('VH2') storage.add_mountPoints(hostMount2) healthnmon_db_api.storage_volume_save(get_admin_context(), storage) vm = Vm() vm.set_id('vm-01') vm.set_name('vm-01') vm.set_vmHostId('VH1') healthnmon_db_api.vm_save(get_admin_context(), vm) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertFalse(vmhosts is None, 'host get by id returned a none list') self.assertTrue( len(vmhosts) > 0, 'host get by id returned invalid number of list') # self.assertRaises(Exception, healthnmon_db_api.vm_host_delete_by_ids,([vmhost_id])) healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), [vmhost_id]) vmhosts = \ healthnmon_db_api.vm_host_get_by_ids(get_admin_context(), [vmhost_id]) self.assertTrue(vmhosts is None or len(vmhosts) == 0, 'host not deleted')