Ejemplo n.º 1
0
 def test_vm_host_get_all_by_filters_changessince(self):
     # Create VmHosts
     host_ids = ('VH1', 'VH2', 'VH3')
     host_names = ('name1', 'name2', 'name3')
     for i in range(len(host_ids)):
         self.__create_vm_host(id=host_ids[i], name=host_names[i])
     created_time = long(time.time() * 1000L)
     # Wait for 1 sec and update second host and delete third host
     time.sleep(1)
     second_host = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [host_ids[1]])[0]
     second_host.name = 'New name'
     healthnmon_db_api.vm_host_save(self.admin_context, second_host)
     healthnmon_db_api.vm_host_delete_by_ids(
         self.admin_context, [host_ids[2]])
     # Query with filter
     expected_updated_ids = [host_ids[1], host_ids[2]]
     filters = {'changes-since': created_time}
     vmhosts = healthnmon_db_api.vm_host_get_all_by_filters(
         self.admin_context, filters,
         None, None)
     self.assert_(vmhosts is not None)
     self.assert_(len(vmhosts) == 2)
     for host in vmhosts:
         self.assert_(host is not None)
         self.assert_(host.id in expected_updated_ids)
Ejemplo n.º 2
0
 def test_vm_host_get_all_by_filters_changessince(self):
     # Create VmHosts
     host_ids = ('VH1', 'VH2', 'VH3')
     host_names = ('name1', 'name2', 'name3')
     for i in range(len(host_ids)):
         self.__create_vm_host(id=host_ids[i], name=host_names[i])
     created_time = long(time.time() * 1000L)
     # Wait for 1 sec and update second host and delete third host
     time.sleep(1)
     second_host = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [host_ids[1]])[0]
     second_host.name = 'New name'
     healthnmon_db_api.vm_host_save(self.admin_context, second_host)
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context,
                                             [host_ids[2]])
     # Query with filter
     expected_updated_ids = [host_ids[1], host_ids[2]]
     filters = {'changes-since': created_time}
     vmhosts = healthnmon_db_api.vm_host_get_all_by_filters(
         self.admin_context, filters, None, None)
     self.assert_(vmhosts is not None)
     self.assert_(len(vmhosts) == 2)
     for host in vmhosts:
         self.assert_(host is not None)
         self.assert_(host.id in expected_updated_ids)
Ejemplo n.º 3
0
    def test_host_removed_event(self):
        self.__mock_service_get_all_by_topic()
        deleted_host = VmHost()
        deleted_host.set_id('compute1')
        deleted_host.set_name('compute1')
        self.mox.StubOutWithMock(api, 'vm_host_get_all')
        api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host])
        self.mox.StubOutWithMock(api, 'vm_get_all')
        api.vm_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'storage_volume_get_all')
        api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'subnet_get_all')
        api.subnet_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(nova_db, 'compute_node_get_all')
        nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')

        api.vm_host_delete_by_ids(
            mox.IgnoreArg(),
            mox.IgnoreArg()).MultipleTimes().AndReturn(None)
        self.mox.StubOutWithMock(
            InventoryCacheManager, 'get_compute_conn_driver')

        InventoryCacheManager.get_compute_conn_driver(
            'compute1',
            Constants.VmHost).AndReturn(fake.get_connection())
        self.mox.ReplayAll()
        compute_service = dict(host='host1')
        compute = dict(id='compute1', hypervisor_type='fake',
                       service=compute_service)
        rm_context = \
            rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'],
                                       rmIpAddress=compute_service['host'],
                                       rmUserName='******',
                                       rmPassword='******')

        InventoryCacheManager.get_all_compute_inventory().clear()

        InventoryCacheManager.get_all_compute_inventory()['compute1'] = \
            ComputeInventory(rm_context)
        InventoryCacheManager.get_compute_inventory(
            'compute1').update_compute_info(rm_context, deleted_host)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 1)
        inv_manager = InventoryManager()
        inv_manager._refresh_from_db(None)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
        msg = test_notifier.NOTIFICATIONS[0]
        self.assertEquals(msg['priority'], notifier_api.INFO)
        event_type = \
            event_metadata.get_EventMetaData(
                event_metadata.EVENT_TYPE_HOST_REMOVED)
        self.assertEquals(msg['event_type'],
                          event_type.get_event_fully_qal_name())
        payload = msg['payload']
        self.assertEquals(payload['entity_type'], 'VmHost')
        self.assertEquals(payload['entity_id'], deleted_host.id)
Ejemplo n.º 4
0
    def test_host_removed_event(self):
        self.__mock_service_get_all_by_topic()
        deleted_host = VmHost()
        deleted_host.set_id('compute1')
        deleted_host.set_name('compute1')
        self.mox.StubOutWithMock(api, 'vm_host_get_all')
        api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host])
        self.mox.StubOutWithMock(api, 'vm_get_all')
        api.vm_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'storage_volume_get_all')
        api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'subnet_get_all')
        api.subnet_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(nova_db, 'compute_node_get_all')
        nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')

        api.vm_host_delete_by_ids(
            mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None)
        self.mox.StubOutWithMock(InventoryCacheManager,
                                 'get_compute_conn_driver')

        InventoryCacheManager.get_compute_conn_driver(
            'compute1', Constants.VmHost).AndReturn(fake.get_connection())
        self.mox.ReplayAll()
        compute_service = dict(host='host1')
        compute = dict(id='compute1',
                       hypervisor_type='fake',
                       service=compute_service)
        rm_context = \
            rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'
                                                      ], rmIpAddress=compute_service['host'],
                                       rmUserName='******', rmPassword='******')

        InventoryCacheManager.get_all_compute_inventory().clear()

        InventoryCacheManager.get_all_compute_inventory(
        )['compute1'] = ComputeInventory(rm_context)
        InventoryCacheManager.get_compute_inventory(
            'compute1').update_compute_info(rm_context, deleted_host)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 1)
        inv_manager = InventoryManager()
        inv_manager._refresh_from_db(None)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
        msg = test_notifier.NOTIFICATIONS[0]
        self.assertEquals(msg['priority'], notifier_api.INFO)
        event_type = \
            event_metadata.get_EventMetaData(
                event_metadata.EVENT_TYPE_HOST_REMOVED)
        self.assertEquals(msg['event_type'],
                          event_type.get_event_fully_qal_name())
        payload = msg['payload']
        self.assertEquals(payload['entity_type'], 'VmHost')
        self.assertEquals(payload['entity_id'], deleted_host.id)
Ejemplo n.º 5
0
 def test_deleted_vm_host(self):
     """ Test if vmhost get all by filters lists deleted virtual machines
         if the vmhost was deleted.
     """
     vmhost, vm_01, vm_02 = self._setup_host()
     self.__save(vmhost, vm_01, vm_02)
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context, vmhost.id)
     self.assertEqual(
         healthnmon_db_api.vm_get_all_by_filters(self.admin_context,
                                                 {'id': vm_01.id}, None,
                                                 None)[0].deleted, True,
         'Delete vm host and assert if VM is deleted')
Ejemplo n.º 6
0
    def test_host_removed_event_none_host(self):
        deleted_host = VmHost()
        deleted_host.set_id('compute1')
        deleted_host.set_name('compute1')
        self.mox.StubOutWithMock(api, 'vm_host_get_all')
        api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host])
        self.mox.StubOutWithMock(api, 'vm_get_all')
        api.vm_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'storage_volume_get_all')
        api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'subnet_get_all')
        api.subnet_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(nova_db, 'compute_node_get_all')
        nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')

        api.vm_host_delete_by_ids(
            mox.IgnoreArg(),
            mox.IgnoreArg()).MultipleTimes().AndReturn(None)

        self.mox.StubOutWithMock(
            InventoryCacheManager, 'get_compute_conn_driver')

        InventoryCacheManager.get_compute_conn_driver(
            'compute1',
            Constants.VmHost).AndReturn(fake.get_connection())
        self.mox.ReplayAll()

        compute_service = dict(host='host1')
        compute = dict(id='compute1', hypervisor_type='fake',
                       service=compute_service)
        rm_context = \
            rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'],
                                       rmIpAddress=compute_service['host'],
                                       rmUserName='******',
                                       rmPassword='******')

        InventoryCacheManager.get_all_compute_inventory().clear()

        InventoryCacheManager.get_all_compute_inventory()['compute1'] = \
            ComputeInventory(rm_context)
        InventoryCacheManager.get_compute_inventory(
            'compute1').update_compute_info(rm_context, deleted_host)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 1)
        InventoryCacheManager.get_inventory_cache(
        )[Constants.VmHost][deleted_host.get_id()] = None

        inv_manager = InventoryManager()
        inv_manager._refresh_from_db(None)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
Ejemplo n.º 7
0
 def test_inconsistent_vmhost(self):
     """ Test if vmhost get all by filters lists deleted virtual machines
         if an inconsistent vmhost was deleted.
     """
     vmhost, vm_01, vm_02 = self._setup_host()
     vmhost.virtualMachineIds = ['a', 'b']
     self.__save(vmhost, vm_01, vm_02)
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context, vmhost.id)
     self.assertEqual(
         healthnmon_db_api.vm_get_all_by_filters(self.admin_context,
                                                 {'deleted': 'true'}, None,
                                                 None)[0].deleted, True,
         'Delete inconsistent vm host and assert if VM is deleted')
Ejemplo n.º 8
0
 def test_deleted_vm_host(self):
     """ Test if vmhost get all by filters lists deleted virtual machines
         if the vmhost was deleted.
     """
     vmhost, vm_01, vm_02 = self._setup_host()
     self.__save(vmhost, vm_01, vm_02)
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context, vmhost.id)
     self.assertEqual(
         healthnmon_db_api.vm_get_all_by_filters(self.admin_context,
                                                 {'id': vm_01.id},
                                                 None,
                                                 None)[0].deleted,
         True, 'Delete vm host and assert if VM is deleted')
Ejemplo n.º 9
0
    def test_host_removed_event_none_host(self):
        deleted_host = VmHost()
        deleted_host.set_id('compute1')
        deleted_host.set_name('compute1')
        self.mox.StubOutWithMock(api, 'vm_host_get_all')
        api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host])
        self.mox.StubOutWithMock(api, 'vm_get_all')
        api.vm_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'storage_volume_get_all')
        api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'subnet_get_all')
        api.subnet_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(nova_db, 'compute_node_get_all')
        nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
        self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')

        api.vm_host_delete_by_ids(
            mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None)

        self.mox.StubOutWithMock(InventoryCacheManager,
                                 'get_compute_conn_driver')

        InventoryCacheManager.get_compute_conn_driver(
            'compute1', Constants.VmHost).AndReturn(fake.get_connection())
        self.mox.ReplayAll()

        compute_service = dict(host='host1')
        compute = dict(id='compute1',
                       hypervisor_type='fake',
                       service=compute_service)
        rm_context = \
            rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'
                                                      ], rmIpAddress=compute_service['host'],
                                       rmUserName='******', rmPassword='******')

        InventoryCacheManager.get_all_compute_inventory().clear()

        InventoryCacheManager.get_all_compute_inventory(
        )['compute1'] = ComputeInventory(rm_context)
        InventoryCacheManager.get_compute_inventory(
            'compute1').update_compute_info(rm_context, deleted_host)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 1)
        InventoryCacheManager.get_inventory_cache()[Constants.VmHost][
            deleted_host.get_id()] = None

        inv_manager = InventoryManager()
        inv_manager._refresh_from_db(None)
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
Ejemplo n.º 10
0
 def test_inconsistent_vmhost(self):
     """ Test if vmhost get all by filters lists deleted virtual machines
         if an inconsistent vmhost was deleted.
     """
     vmhost, vm_01, vm_02 = self._setup_host()
     vmhost.virtualMachineIds = ['a', 'b']
     self.__save(vmhost, vm_01, vm_02)
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context, vmhost.id)
     self.assertEqual(
         healthnmon_db_api.vm_get_all_by_filters(self.admin_context,
                                                 {'deleted': 'true'},
                                                 None,
                                                 None)[0].deleted,
         True, 'Delete inconsistent vm host and assert if VM is deleted')
Ejemplo n.º 11
0
    def test_refresh_from_db_delete_host(self):
        self._createInvCache()
        InventoryCacheManager.get_all_compute_inventory().clear()
        compute = []
        self.mox.StubOutWithMock(db, 'compute_node_get_all')
        db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute)

        im = self.inv_manager
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)

        compute = _create_Compute(compute_id='vmhost1')
        service = compute['service']
        rm_context = \
            rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'],
                                       rmIpAddress=service['host'],
                                       rmUserName='******',
                                       rmPassword='******')
        InventoryCacheManager.get_all_compute_inventory()['vmhost1'] = \
            ComputeInventory(rm_context)

        vmhost = VmHost()
        vmhost.set_id('vmhost1')
        vmhost.set_name('vmhost1')
        InventoryCacheManager.get_all_compute_inventory(
        )['vmhost1'].update_compute_info(rm_context, vmhost)

        self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')

        api.vm_host_delete_by_ids(
            mox.IgnoreArg(),
            mox.IgnoreArg()).MultipleTimes().AndReturn(None)

        self.mox.StubOutWithMock(event_api, 'notify_host_update')
        event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg())

        self.mox.ReplayAll()

        im._refresh_from_db(None)
        self.mox.VerifyAll()
        self.mox.stubs.UnsetAll()
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertTrue(InventoryCacheManager.get_all_compute_inventory(
        ).get('compute1') is None)

        self.mox.UnsetStubs()
Ejemplo n.º 12
0
 def test_vm_host_get_all_by_filters_not_deleted(self):
     # Create VmHosts
     host_ids = ('VH1', 'VH2')
     host_names = ('name1', 'name2')
     for i in range(len(host_ids)):
         self.__create_vm_host(id=host_ids[i], name=host_names[i])
     # Delete one host
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context,
                                             [host_ids[0]])
     # Query with filter
     filters = {'deleted': False}
     vmhosts = healthnmon_db_api.vm_host_get_all_by_filters(
         self.admin_context, filters, 'id', DbConstants.ORDER_ASC)
     self.assert_(vmhosts is not None)
     self.assert_(len(vmhosts) == 1)
     self.assert_(vmhosts[0] is not None)
     self.assert_(vmhosts[0].id == host_ids[1])
Ejemplo n.º 13
0
    def test_refresh_from_db_delete_host(self):
        self._createInvCache()
        InventoryCacheManager.get_all_compute_inventory().clear()
        compute = []
        self.mox.StubOutWithMock(db, 'compute_node_get_all')
        db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute)

        im = self.inv_manager
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)

        compute = _create_Compute(compute_id='vmhost1')
        service = compute['service']
        rm_context = \
            rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'
                                                      ], rmIpAddress=service['host'], rmUserName='******',
                                       rmPassword='******')
        InventoryCacheManager.get_all_compute_inventory()['vmhost1'] = \
            ComputeInventory(rm_context)

        vmhost = VmHost()
        vmhost.set_id('vmhost1')
        vmhost.set_name('vmhost1')
        InventoryCacheManager.get_all_compute_inventory(
        )['vmhost1'].update_compute_info(rm_context, vmhost)

        self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')

        api.vm_host_delete_by_ids(
            mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None)

        self.mox.StubOutWithMock(event_api, 'notify_host_update')
        event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg())

        self.mox.ReplayAll()

        im._refresh_from_db(None)
        self.mox.VerifyAll()
        self.mox.stubs.UnsetAll()
        self.assertEquals(
            len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertTrue(InventoryCacheManager.get_all_compute_inventory().get(
            'compute1') is None)

        self.mox.UnsetStubs()
Ejemplo n.º 14
0
 def test_vm_host_get_all_by_filters_not_deleted(self):
     # Create VmHosts
     host_ids = ('VH1', 'VH2')
     host_names = ('name1', 'name2')
     for i in range(len(host_ids)):
         self.__create_vm_host(id=host_ids[i], name=host_names[i])
     # Delete one host
     healthnmon_db_api.vm_host_delete_by_ids(
         self.admin_context, [host_ids[0]])
     # Query with filter
     filters = {'deleted': False}
     vmhosts = healthnmon_db_api.vm_host_get_all_by_filters(
         self.admin_context, filters,
         'id', DbConstants.ORDER_ASC)
     self.assert_(vmhosts is not None)
     self.assert_(len(vmhosts) == 1)
     self.assert_(vmhosts[0] is not None)
     self.assert_(vmhosts[0].id == host_ids[1])
Ejemplo n.º 15
0
 def test_vm_host_delete_none(self):
     '''
     Check the vm_host_delete_by_ids by passing None as id
     '''
     vmhost = VmHost()
     vmhost.id = 'VH1-id'
     healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
     vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context())
     self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None')
     self.assertTrue(len(vmhosts) == 1,
                     'vm_host_get_all does not returned expected number of hosts')
     #Now call the delete api by passing the id as None
     healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), None)
     #Again try to retrieve the vmhost and check whether its intact
     vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context())
     self.assertFalse(vmhosts is None,
                      'vm_host_get_all returned a None')
     self.assertTrue(len(vmhosts) == 1,
                     'vm_host_get_all does not returned expected number of hosts')
Ejemplo n.º 16
0
    def _clean_deleted_computes(self, db_keys):
        keys = InventoryCacheManager.get_all_compute_inventory(
        ).keys()  # since we're deleting
        deletion_list = []
        for compute_id in keys:
            if compute_id not in db_keys:
                vmHostObj = InventoryCacheManager.get_all_compute_inventory()[
                    compute_id].get_compute_info()
                if vmHostObj is not None:
                    deletion_list.append(vmHostObj.get_id())

        host_deleted_list = []
        if len(deletion_list) != 0:
            # Delete object from cache
            for _id in deletion_list:
                host_deleted = InventoryCacheManager.get_object_from_cache(
                    _id, Constants.VmHost)
                if host_deleted is not None:
                    host_deleted_list.append(
                        InventoryCacheManager.
                        get_object_from_cache(_id, Constants.VmHost))
                else:
                    LOG.warn(_(
                        "VmHost object for id %s not found in cache") % _id)

            # Delete the VmHost from DB
            api.vm_host_delete_by_ids(get_admin_context(), deletion_list)
            # Generate the VmHost Removed Event
            for host_deleted in host_deleted_list:
                LOG.debug(_('Generating Host Removed event for the \
                host id : %s') % str(
                    host_deleted.get_id()))
                event_api.notify_host_update(
                    event_metadata.EVENT_TYPE_HOST_REMOVED, host_deleted)
                # VmHost is deleted from compute inventory and inventory cache
                # after notifying the event
                del InventoryCacheManager.get_all_compute_inventory()[
                    host_deleted.get_id()]
                InventoryCacheManager.delete_object_in_cache(
                    host_deleted.get_id(), Constants.VmHost)
                LOG.audit(_('Host with (UUID, host name) - (%s, %s) \
                got removed') % (
                    host_deleted.get_id(), host_deleted.get_name()))
Ejemplo n.º 17
0
 def test_vm_host_delete_none(self):
     '''
     Check the vm_host_delete_by_ids by passing None as id
     '''
     vmhost = VmHost()
     vmhost.id = 'VH1-id'
     healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
     vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context())
     self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None')
     self.assertTrue(
         len(vmhosts) == 1,
         'vm_host_get_all does not returned expected number of hosts')
     #Now call the delete api by passing the id as None
     healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), None)
     #Again try to retrieve the vmhost and check whether its intact
     vmhosts = healthnmon_db_api.vm_host_get_all(get_admin_context())
     self.assertFalse(vmhosts is None, 'vm_host_get_all returned a None')
     self.assertTrue(
         len(vmhosts) == 1,
         'vm_host_get_all does not returned expected number of hosts')
    def test_refresh_from_db_delete_host(self):
        self._createInvCache()
        InventoryCacheManager.get_all_compute_inventory().clear()
        compute = []
        self.mox.StubOutWithMock(db, "compute_node_get_all")
        db.compute_node_get_all(mox.IgnoreArg()).AndReturn(compute)

        im = self.inv_manager
        self.assertEquals(len(InventoryCacheManager.get_all_compute_inventory()), 0)

        compute = _create_Compute(compute_id="vmhost1")
        service = compute["service"]
        rm_context = rmcontext.ComputeRMContext(
            rmType=compute["hypervisor_type"],
            rmIpAddress=service["host"],
            rmUserName="******",
            rmPassword="******",
        )
        InventoryCacheManager.get_all_compute_inventory()["vmhost1"] = ComputeInventory(rm_context)

        vmhost = VmHost()
        vmhost.set_id("vmhost1")
        vmhost.set_name("vmhost1")
        InventoryCacheManager.get_all_compute_inventory()["vmhost1"].update_compute_info(rm_context, vmhost)

        self.mox.StubOutWithMock(api, "vm_host_delete_by_ids")

        api.vm_host_delete_by_ids(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(None)

        self.mox.StubOutWithMock(event_api, "notify_host_update")
        event_api.notify_host_update(mox.IgnoreArg(), mox.IgnoreArg())

        self.mox.ReplayAll()

        im._refresh_from_db(None)
        self.mox.VerifyAll()
        self.mox.stubs.UnsetAll()
        self.assertEquals(len(InventoryCacheManager.get_all_compute_inventory()), 0)
        self.assertTrue(InventoryCacheManager.get_all_compute_inventory().get("compute1") is None)

        self.mox.UnsetStubs()
Ejemplo n.º 19
0
    def _refresh_from_db(self, context):
        """Make our compute_node inventory map match the db."""

        # Add/update existing compute_nodes ...

        computes = db.compute_node_get_all(context)
        existing = InventoryCacheManager.get_all_compute_inventory().keys()
        db_keys = []
        for compute in computes:
            compute_id = str(compute["id"])
            service = compute["service"]
            compute_hypervisor_type = compute["hypervisor_type"]
            LOG.debug(_(" Compute-id -> %s: Hypervisor Type -> %s") % (compute_id, compute_hypervisor_type))

            if service is not None and compute_hypervisor_type == "QEMU":
                compute_alive = hnm_utils.is_service_alive(service["updated_at"], service["created_at"])
                db_keys.append(compute_id)
                if not compute_alive:
                    LOG.warn(_("Service %s for host %s is not active") % (service["binary"], service["host"]))
                    continue
                if compute_id not in existing:
                    rm_context = rmcontext.ComputeRMContext(
                        rmType=compute["hypervisor_type"],
                        rmIpAddress=service["host"],
                        rmUserName="******",
                        rmPassword="******",
                    )
                    InventoryCacheManager.get_all_compute_inventory()[compute_id] = ComputeInventory(rm_context)
                    LOG.audit(_("New Host with compute_id  %s is obtained") % (compute_id))
                InventoryCacheManager.get_all_compute_inventory()[compute_id].update_compute_Id(compute_id)
            else:
                LOG.warn(_(" No services entry found for compute id  %s") % compute_id)

        # Cleanup compute_nodes removed from db ...

        keys = InventoryCacheManager.get_all_compute_inventory().keys()  # since we're deleting
        deletion_list = []
        for compute_id in keys:
            if compute_id not in db_keys:
                vmHostObj = InventoryCacheManager.get_all_compute_inventory()[compute_id].get_compute_info()
                if vmHostObj is not None:
                    deletion_list.append(vmHostObj.get_id())

        host_deleted_list = []
        if len(deletion_list) != 0:
            # Delete object from cache
            for _id in deletion_list:
                host_deleted = InventoryCacheManager.get_object_from_cache(_id, Constants.VmHost)
                if host_deleted is not None:
                    host_deleted_list.append(InventoryCacheManager.get_object_from_cache(_id, Constants.VmHost))
                else:
                    LOG.warn(_("VmHost object for id %s not found in cache") % _id)

            # Delete the VmHost from DB
            api.vm_host_delete_by_ids(get_admin_context(), deletion_list)
            # Generate the VmHost Removed Event
            for host_deleted in host_deleted_list:
                LOG.debug(
                    _(
                        "Generating Host Removed event \
                for the host id : %s"
                    )
                    % str(host_deleted.get_id())
                )
                event_api.notify_host_update(event_metadata.EVENT_TYPE_HOST_REMOVED, host_deleted)
                # VmHost is deleted from compute inventory and inventory
                # cache after notifying the event
                del InventoryCacheManager.get_all_compute_inventory()[host_deleted.get_id()]
                InventoryCacheManager.delete_object_in_cache(host_deleted.get_id(), Constants.VmHost)
                LOG.audit(
                    _(
                        "Host with (UUID, host name) \
                - (%s, %s) got removed"
                    )
                    % (host_deleted.get_id(), host_deleted.get_name())
                )
Ejemplo n.º 20
0
    def test_vmhost_save_modify_delete_with_vSwitch_pGroup(self):
        """Test case for filter deleted virtual switch and port group
        1. Create host with 2 virtual switch and port groups.
        2. Assert for the above point.
        3. Save VmHost by removing one virtualwitch and one port group.
        4. Assert for deleted virtual switch and port group.
        5. Delete the host.
        6. Use filter_by api to assert for deleted host,
        virtual switch and port group.
        """
        "Test for  points 1 and 2"
        host_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = host_id
        cost = self._create_cost()
        vSwitch1 = self._create_switch(host_id + '_vSwitch-01')
        vSwitch1.set_cost(cost)
        portGroup1 = self._create_port_group(host_id + '_pGroup-01')
        portGroup1.set_cost(cost)
        vSwitch1.add_portGroups(portGroup1)
        vmhost.add_virtualSwitches(vSwitch1)
        vmhost.add_portGroups(portGroup1)

        "Add the second vswitch and portgroup"
        vSwitch2 = self._create_switch(host_id + '_vSwitch-02')
        vSwitch2.set_cost(cost)
        portGroup2 = self._create_port_group(host_id + '_pGroup-02')
        portGroup2.set_cost(cost)
        vSwitch2.add_portGroups(portGroup2)
        vmhost.add_virtualSwitches(vSwitch2)
        vmhost.add_portGroups(portGroup2)

        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
        vmhosts = healthnmon_db_api.vm_host_get_by_ids(
            get_admin_context(), [host_id])
        self.assertFalse(
            vmhosts is None, 'Host get by id returned a none list')
        self.assertTrue(
            len(vmhosts[0].get_virtualSwitches()) > 0,
            'Host get by virtual switch returned invalid number of list')
        vss = vmhosts[0].get_virtualSwitches()
        vs_ids = []
        for vs in vss:
            vs_ids.append(vs.get_id())
        self.assertTrue(
            vSwitch1.get_id() in vs_ids,
            "Added virtual switch1 does not appears in the host api")
        self.assertTrue(
            vSwitch2.get_id() in vs_ids,
            "Added virtual switch2 does not appears in the host api")
        pgs = vmhosts[0].get_portGroups()
        pg_ids = []
        for pg in pgs:
            pg_ids.append(pg.get_id())
        self.assertTrue(portGroup1.get_id(
        ) in pg_ids, "Added port group1 does not appears in the host api")
        self.assertTrue(portGroup2.get_id(
        ) in pg_ids, "Added port group2 does not appears in the host api")

        # Points 3 and 4 - Remove the second vswitch and
        # portgroup from the vmhost and the save the vmhost"
        vmhost = VmHost()
        vmhost.id = host_id
        cost = self._create_cost()
        vSwitch = self._create_switch(host_id + '_vSwitch-01')
        vSwitch.set_cost(cost)
        portGroup = self._create_port_group(host_id + '_pGroup-01')
        portGroup.set_cost(cost)
        vSwitch.add_portGroups(portGroup)
        vmhost.add_virtualSwitches(vSwitch)
        vmhost.add_portGroups(portGroup)
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)

        vmhosts = healthnmon_db_api.vm_host_get_by_ids(
            get_admin_context(), [host_id])
        self.assertFalse(
            vmhosts is None, 'Host get by id returned a none list')
        vss = vmhosts[0].get_virtualSwitches()
        vs_ids = []
        for vs in vss:
            vs_ids.append(vs.get_id())
        self.assertTrue(
            vSwitch.get_id() in vs_ids,
            "Modified virtual switch1 not appearing in the host api")
        self.assertTrue(vSwitch2.get_id() not in vs_ids,
                        "Deleted virtual switch2 appears in the host api")
        pgs = vmhosts[0].get_portGroups()
        pg_ids = []
        for pg in pgs:
            pg_ids.append(pg.get_id())
        self.assertTrue(portGroup.get_id(
        ) in pg_ids, "Modified port group1 not appearing in the host api")
        self.assertTrue(portGroup2.get_id(
        ) not in pg_ids, "Deleted port group2 appears in the host api")

        # Points 5 and 6 - Delete the host and
        # assert for deletion using filter-by api"
        filters = {'id': host_id, 'deleted': 'true'}
        healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), [host_id])
        del_vmhosts = healthnmon_db_api.vm_host_get_all_by_filters(
            get_admin_context(), filters, 'id', 'asc')
        self.assertFalse(
            del_vmhosts is None, 'Host get by filters returned a none list')
        vss = del_vmhosts[0].get_virtualSwitches()
        vs_ids = []
        for vs in vss:
            vs_ids.append(vs.get_id())
        self.assertTrue(
            vSwitch1.get_id() in vs_ids,
            "Deleted virtual switch1 not appearing in the host filter api")
        self.assertTrue(
            vSwitch2.get_id() in vs_ids,
            "Deleted virtual switch2 not appears in the host filter api")
        pgs = del_vmhosts[0].get_portGroups()
        pg_ids = []
        for pg in pgs:
            pg_ids.append(pg.get_id())
        self.assertTrue(
            portGroup1.get_id() in pg_ids,
            "Deleted port group1 not appearing in the host filter api")
        self.assertTrue(
            portGroup2.get_id() in pg_ids,
            "Deleted port group2 not appears in the host filter api")
Ejemplo n.º 21
0
 def test_timestamp_columns(self):
     """
         Test the time stamp columns createEpoch,
         modifiedEpoch and deletedEpoch
     """
     vmhost = VmHost()
     vmhost.set_id('VH1')
     virSw1 = VirtualSwitch()
     virSw1.set_id('VS1_VH1')
     portGrp1 = PortGroup()
     portGrp1.set_id('PG1_VH1')
     virSw1.add_portGroups(portGrp1)
     vmhost.add_virtualSwitches(virSw1)
     vmhost.add_portGroups(portGrp1)
     # Check for createEpoch
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_save(self.admin_context, vmhost)
     epoch_after = utils.get_current_epoch_ms()
     vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [vmhost.get_id()])[0]
     self.assert_(test_utils.is_timestamp_between(
         epoch_before, epoch_after, vmhost_queried.get_createEpoch()))
     for virSw in vmhost_queried.get_virtualSwitches():
         self.assert_(test_utils.is_timestamp_between(
             epoch_before, epoch_after, virSw.get_createEpoch()))
         for pg in virSw.get_portGroups():
             self.assert_(test_utils.is_timestamp_between(
                 epoch_before, epoch_after, pg.get_createEpoch()))
     # Check for lastModifiedEpoch after modifying host
     vmhost_modified = vmhost_queried
     test_utils.unset_timestamp_fields(vmhost_modified)
     vmhost_modified.set_name('changed_name')
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified)
     epoch_after = utils.get_current_epoch_ms()
     vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [vmhost.get_id()])[0]
     self.assert_(vmhost_modified.get_createEpoch(
     ) == vmhost_queried.get_createEpoch())
     self.assert_(test_utils.is_timestamp_between(
         epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch()))
     for virSw in vmhost_queried.get_virtualSwitches():
         self.assert_(test_utils.is_timestamp_between(
             epoch_before, epoch_after, virSw.get_lastModifiedEpoch()))
         for pg in virSw.get_portGroups():
             self.assert_(test_utils.is_timestamp_between(
                 epoch_before, epoch_after, pg.get_lastModifiedEpoch()))
     # Check for createdEpoch after adding switch and portgroup to host
     vmhost_modified = vmhost_queried
     test_utils.unset_timestamp_fields(vmhost_modified)
     virSw2 = VirtualSwitch()
     virSw2.set_id('VS2_VH1')
     portGrp2 = PortGroup()
     portGrp2.set_id('PG2_VH1')
     virSw2.add_portGroups(portGrp2)
     vmhost_modified.add_virtualSwitches(virSw2)
     vmhost_modified.add_portGroups(portGrp2)
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified)
     epoch_after = utils.get_current_epoch_ms()
     vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [vmhost.get_id()])[0]
     self.assert_(vmhost_modified.get_createEpoch(
     ) == vmhost_queried.get_createEpoch())
     self.assert_(test_utils.is_timestamp_between(
         epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch()))
     for virSw in vmhost_queried.get_virtualSwitches():
         if virSw.get_id() == virSw2.get_id():
             self.assert_(test_utils.is_timestamp_between(
                 epoch_before, epoch_after, virSw.get_createEpoch()))
         else:
             self.assert_(test_utils.is_timestamp_between(
                 epoch_before, epoch_after, virSw.get_lastModifiedEpoch()))
         for pg in virSw.get_portGroups():
             if pg.get_id() == portGrp2.get_id():
                 self.assert_(test_utils.is_timestamp_between(
                     epoch_before, epoch_after, pg.get_createEpoch()))
             else:
                 self.assert_(test_utils.is_timestamp_between(
                     epoch_before, epoch_after, pg.get_lastModifiedEpoch()))
     # Check for deletedEpoch
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_delete_by_ids(
         self.admin_context, [vmhost_queried.get_id()])
     epoch_after = utils.get_current_epoch_ms()
     deleted_host = healthnmon_db_api.vm_host_get_all_by_filters(
         self.admin_context,
         {"id": vmhost_queried.get_id()}, None, None)[0]
     self.assertTrue(deleted_host.get_deleted())
     self.assert_(test_utils.is_timestamp_between(
         epoch_before, epoch_after, deleted_host.get_deletedEpoch()))
     deleted_switches = healthnmon_db_api.\
         virtual_switch_get_all_by_filters(self.admin_context,
                                           {"id": (virSw1.get_id(),
                                                   virSw2.get_id())},
                                           None, None)
     for deleted_switch in deleted_switches:
         self.assertTrue(deleted_switch.get_deleted())
         self.assert_(test_utils.is_timestamp_between(
             epoch_before, epoch_after, deleted_switch.get_deletedEpoch()))
         for deleted_portgrp in deleted_switch.get_portGroups():
             self.assertTrue(deleted_portgrp.get_deleted())
             self.assert_(test_utils.is_timestamp_between(
                 epoch_before, epoch_after,
                 deleted_portgrp.get_deletedEpoch()))
Ejemplo n.º 22
0
    def _refresh_from_db(self, context):
        """Make our compute_node inventory map match the db."""

        # Add/update existing compute_nodes ...

        computes = db.compute_node_get_all(context)
        existing = InventoryCacheManager.get_all_compute_inventory().keys()
        db_keys = []
        for compute in computes:
            compute_id = str(compute['id'])
            service = compute['service']
            compute_hypervisor_type = compute['hypervisor_type']
            LOG.debug(_(' Compute-id -> %s: Hypervisor Type -> %s') %
                      (compute_id, compute_hypervisor_type))

            if service is not None and compute_hypervisor_type == 'QEMU':
                compute_alive = hnm_utils.is_service_alive(
                    service['updated_at'], service['created_at'])
                db_keys.append(compute_id)
                if not compute_alive:
                    LOG.warn(_('Service %s for host %s is not active')
                             % (service['binary'], service['host']))
                    continue
                if compute_id not in existing:
                    rm_context = \
                        rmcontext.ComputeRMContext(
                            rmType=compute['hypervisor_type'],
                            rmIpAddress=service['host'],
                            rmUserName='******', rmPassword='******')
                    InventoryCacheManager.\
                        get_all_compute_inventory()[compute_id] =\
                        ComputeInventory(rm_context)
                    LOG.audit(_('New Host with compute_id  %s is obtained')
                              % (compute_id))
                InventoryCacheManager.get_all_compute_inventory(
                )[compute_id].update_compute_Id(compute_id)
            else:
                LOG.warn(_(' No services entry found for compute id  %s')
                         % compute_id)

        # Cleanup compute_nodes removed from db ...

        keys = InventoryCacheManager.get_all_compute_inventory(
        ).keys()  # since we're deleting
        deletion_list = []
        for compute_id in keys:
            if compute_id not in db_keys:
                vmHostObj = InventoryCacheManager.get_all_compute_inventory(
                )[compute_id].get_compute_info()
                if vmHostObj is not None:
                    deletion_list.append(vmHostObj.get_id())

        host_deleted_list = []
        if len(deletion_list) != 0:
            # Delete object from cache
            for _id in deletion_list:
                host_deleted = InventoryCacheManager.get_object_from_cache(
                    _id, Constants.VmHost)
                if host_deleted is not None:
                    host_deleted_list.append(
                        InventoryCacheManager.get_object_from_cache(
                            _id, Constants.VmHost))
                else:
                    LOG.warn(
                        _("VmHost object for id %s not found in cache") % _id)

            # Delete the VmHost from DB
            api.vm_host_delete_by_ids(get_admin_context(), deletion_list)
            # Generate the VmHost Removed Event
            for host_deleted in host_deleted_list:
                LOG.debug(_('Generating Host Removed event \
                for the host id : %s') % str(host_deleted.get_id()))
                event_api.notify_host_update(
                    event_metadata.EVENT_TYPE_HOST_REMOVED, host_deleted)
                # VmHost is deleted from compute inventory and inventory
                # cache after notifying the event
                del InventoryCacheManager.get_all_compute_inventory(
                )[host_deleted.get_id()]
                InventoryCacheManager.delete_object_in_cache(
                    host_deleted.get_id(), Constants.VmHost)
                LOG.audit(_('Host with (UUID, host name) \
                - (%s, %s) got removed') % (host_deleted.get_id(),
                                            host_deleted.get_name()))
Ejemplo n.º 23
0
 def test_timestamp_columns(self):
     """
         Test the time stamp columns createEpoch, modifiedEpoch and deletedEpoch
     """
     vmhost = VmHost()
     vmhost.set_id('VH1')
     virSw1 = VirtualSwitch()
     virSw1.set_id('VS1_VH1')
     portGrp1 = PortGroup()
     portGrp1.set_id('PG1_VH1')
     virSw1.add_portGroups(portGrp1)
     vmhost.add_virtualSwitches(virSw1)
     vmhost.add_portGroups(portGrp1)
     # Check for createEpoch
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_save(self.admin_context, vmhost)
     epoch_after = utils.get_current_epoch_ms()
     vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [vmhost.get_id()])[0]
     self.assert_(
         test_utils.is_timestamp_between(epoch_before, epoch_after,
                                         vmhost_queried.get_createEpoch()))
     for virSw in vmhost_queried.get_virtualSwitches():
         self.assert_(
             test_utils.is_timestamp_between(epoch_before, epoch_after,
                                             virSw.get_createEpoch()))
         for pg in virSw.get_portGroups():
             self.assert_(
                 test_utils.is_timestamp_between(epoch_before, epoch_after,
                                                 pg.get_createEpoch()))
     # Check for lastModifiedEpoch after modifying host
     vmhost_modified = vmhost_queried
     test_utils.unset_timestamp_fields(vmhost_modified)
     vmhost_modified.set_name('changed_name')
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified)
     epoch_after = utils.get_current_epoch_ms()
     vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [vmhost.get_id()])[0]
     self.assert_(vmhost_modified.get_createEpoch() ==
                  vmhost_queried.get_createEpoch())
     self.assert_(
         test_utils.is_timestamp_between(
             epoch_before, epoch_after,
             vmhost_queried.get_lastModifiedEpoch()))
     for virSw in vmhost_queried.get_virtualSwitches():
         self.assert_(
             test_utils.is_timestamp_between(epoch_before, epoch_after,
                                             virSw.get_lastModifiedEpoch()))
         for pg in virSw.get_portGroups():
             self.assert_(
                 test_utils.is_timestamp_between(
                     epoch_before, epoch_after, pg.get_lastModifiedEpoch()))
     # Check for createdEpoch after adding switch and portgroup to host
     vmhost_modified = vmhost_queried
     test_utils.unset_timestamp_fields(vmhost_modified)
     virSw2 = VirtualSwitch()
     virSw2.set_id('VS2_VH1')
     portGrp2 = PortGroup()
     portGrp2.set_id('PG2_VH1')
     virSw2.add_portGroups(portGrp2)
     vmhost_modified.add_virtualSwitches(virSw2)
     vmhost_modified.add_portGroups(portGrp2)
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified)
     epoch_after = utils.get_current_epoch_ms()
     vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
         self.admin_context, [vmhost.get_id()])[0]
     self.assert_(vmhost_modified.get_createEpoch() ==
                  vmhost_queried.get_createEpoch())
     self.assert_(
         test_utils.is_timestamp_between(
             epoch_before, epoch_after,
             vmhost_queried.get_lastModifiedEpoch()))
     for virSw in vmhost_queried.get_virtualSwitches():
         if virSw.get_id() == virSw2.get_id():
             self.assert_(
                 test_utils.is_timestamp_between(epoch_before, epoch_after,
                                                 virSw.get_createEpoch()))
         else:
             self.assert_(
                 test_utils.is_timestamp_between(
                     epoch_before, epoch_after,
                     virSw.get_lastModifiedEpoch()))
         for pg in virSw.get_portGroups():
             if pg.get_id() == portGrp2.get_id():
                 self.assert_(
                     test_utils.is_timestamp_between(
                         epoch_before, epoch_after, pg.get_createEpoch()))
             else:
                 self.assert_(
                     test_utils.is_timestamp_between(
                         epoch_before, epoch_after,
                         pg.get_lastModifiedEpoch()))
     # Check for deletedEpoch
     epoch_before = utils.get_current_epoch_ms()
     healthnmon_db_api.vm_host_delete_by_ids(self.admin_context,
                                             [vmhost_queried.get_id()])
     epoch_after = utils.get_current_epoch_ms()
     deleted_host = healthnmon_db_api.vm_host_get_all_by_filters(
         self.admin_context, {"id": vmhost_queried.get_id()}, None, None)[0]
     self.assertTrue(deleted_host.get_deleted())
     self.assert_(
         test_utils.is_timestamp_between(epoch_before, epoch_after,
                                         deleted_host.get_deletedEpoch()))
     deleted_switches = healthnmon_db_api.virtual_switch_get_all_by_filters(
         self.admin_context, {"id": (virSw1.get_id(), virSw2.get_id())},
         None, None)
     for deleted_switch in deleted_switches:
         self.assertTrue(deleted_switch.get_deleted())
         self.assert_(
             test_utils.is_timestamp_between(
                 epoch_before, epoch_after,
                 deleted_switch.get_deletedEpoch()))
         for deleted_portgrp in deleted_switch.get_portGroups():
             self.assertTrue(deleted_portgrp.get_deleted())
             self.assert_(
                 test_utils.is_timestamp_between(
                     epoch_before, epoch_after,
                     deleted_portgrp.get_deletedEpoch()))
Ejemplo n.º 24
0
    def test_vmhost_save_modify_delete_with_vSwitch_pGroup(self):
        """Test case for filter deleted virtual switch and port group
        1. Create host with 2 virtual switch and port groups.
        2. Assert for the above point.
        3. Save VmHost by removing one virtualwitch and one port group.
        4. Assert for deleted virtual switch and port group.
        5. Delete the host.
        6. Use filter_by api to assert for deleted host, virtual switch and port group.
        """
        "Test for  points 1 and 2"
        host_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = host_id
        cost = self._create_cost()
        vSwitch1 = self._create_switch(host_id + '_vSwitch-01')
        vSwitch1.set_cost(cost)
        portGroup1 = self._create_port_group(host_id + '_pGroup-01')
        portGroup1.set_cost(cost)
        vSwitch1.add_portGroups(portGroup1)
        vmhost.add_virtualSwitches(vSwitch1)
        vmhost.add_portGroups(portGroup1)

        "Add the second vswitch and portgroup"
        vSwitch2 = self._create_switch(host_id + '_vSwitch-02')
        vSwitch2.set_cost(cost)
        portGroup2 = self._create_port_group(host_id + '_pGroup-02')
        portGroup2.set_cost(cost)
        vSwitch2.add_portGroups(portGroup2)
        vmhost.add_virtualSwitches(vSwitch2)
        vmhost.add_portGroups(portGroup2)

        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)
        vmhosts = healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                       [host_id])
        self.assertFalse(vmhosts is None,
                         'Host get by id returned a none list')
        self.assertTrue(
            len(vmhosts[0].get_virtualSwitches()) > 0,
            'Host get by virtual switch returned invalid number of list')
        vss = vmhosts[0].get_virtualSwitches()
        vs_ids = []
        for vs in vss:
            vs_ids.append(vs.get_id())
        self.assertTrue(
            vSwitch1.get_id() in vs_ids,
            "Added virtual switch1 does not appears in the host api")
        self.assertTrue(
            vSwitch2.get_id() in vs_ids,
            "Added virtual switch2 does not appears in the host api")
        pgs = vmhosts[0].get_portGroups()
        pg_ids = []
        for pg in pgs:
            pg_ids.append(pg.get_id())
        self.assertTrue(portGroup1.get_id() in pg_ids,
                        "Added port group1 does not appears in the host api")
        self.assertTrue(portGroup2.get_id() in pg_ids,
                        "Added port group2 does not appears in the host api")

        "Points 3 and 4 - Remove the second vswitch and portgroup from the vmhost and the save the vmhost"
        vmhost = VmHost()
        vmhost.id = host_id
        cost = self._create_cost()
        vSwitch = self._create_switch(host_id + '_vSwitch-01')
        vSwitch.set_cost(cost)
        portGroup = self._create_port_group(host_id + '_pGroup-01')
        portGroup.set_cost(cost)
        vSwitch.add_portGroups(portGroup)
        vmhost.add_virtualSwitches(vSwitch)
        vmhost.add_portGroups(portGroup)
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)

        vmhosts = healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                       [host_id])
        self.assertFalse(vmhosts is None,
                         'Host get by id returned a none list')
        vss = vmhosts[0].get_virtualSwitches()
        vs_ids = []
        for vs in vss:
            vs_ids.append(vs.get_id())
        self.assertTrue(
            vSwitch.get_id() in vs_ids,
            "Modified virtual switch1 not appearing in the host api")
        self.assertTrue(vSwitch2.get_id() not in vs_ids,
                        "Deleted virtual switch2 appears in the host api")
        pgs = vmhosts[0].get_portGroups()
        pg_ids = []
        for pg in pgs:
            pg_ids.append(pg.get_id())
        self.assertTrue(portGroup.get_id() in pg_ids,
                        "Modified port group1 not appearing in the host api")
        self.assertTrue(portGroup2.get_id() not in pg_ids,
                        "Deleted port group2 appears in the host api")

        "Points 5 and 6 - Delete the host and assert for deletion using filter-by api"
        filters = {'id': host_id, 'deleted': 'true'}
        healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(), [host_id])
        del_vmhosts = healthnmon_db_api.vm_host_get_all_by_filters(
            get_admin_context(), filters, 'id', 'asc')
        self.assertFalse(del_vmhosts is None,
                         'Host get by filters returned a none list')
        vss = del_vmhosts[0].get_virtualSwitches()
        vs_ids = []
        for vs in vss:
            vs_ids.append(vs.get_id())
        self.assertTrue(
            vSwitch1.get_id() in vs_ids,
            "Deleted virtual switch1 not appearing in the host filter api")
        self.assertTrue(
            vSwitch2.get_id() in vs_ids,
            "Deleted virtual switch2 not appears in the host filter api")
        pgs = del_vmhosts[0].get_portGroups()
        pg_ids = []
        for pg in pgs:
            pg_ids.append(pg.get_id())
        self.assertTrue(
            portGroup1.get_id() in pg_ids,
            "Deleted port group1 not appearing in the host filter api")
        self.assertTrue(
            portGroup2.get_id() in pg_ids,
            "Deleted port group2 not appears in the host filter api")
Ejemplo n.º 25
0
    def test_vm_host_delete(self):
        vmhost_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = vmhost_id
        vSwitch = VirtualSwitch()
        vSwitch.set_id('vSwitch-01')
        vSwitch.set_name('vSwitch-01')
        vSwitch.set_resourceManagerId('rmId')
        vSwitch.set_switchType('vSwitch')

        cost1 = Cost()
        cost1.set_value(100)
        cost1.set_units('USD')
        vSwitch.set_cost(cost1)

        portGroup = PortGroup()
        portGroup.set_id('pg-01')
        portGroup.set_name('pg-01')
        portGroup.set_resourceManagerId('rmId')
        portGroup.set_type('portgroup_type')
        portGroup.set_cost(cost1)
        vSwitch.add_portGroups(portGroup)
        vmhost.add_virtualSwitches(vSwitch)
        vmhost.add_portGroups(portGroup)
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)

        vmhost2 = VmHost()
        vmhost2.set_id('VH2')
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost2)

        storage = StorageVolume()
        storage.set_id('sv-01')
        storage.set_name('storage-01')
        storage.set_resourceManagerId('rmId')
        storage.set_size(1234)
        storage.set_free(2345)
        storage.set_vmfsVolume(True)
        storage.set_shared(True)
        storage.set_assignedServerCount(1)
        storage.set_volumeType('VMFS')
        storage.set_volumeId('101')

        hostMount1 = HostMountPoint()
        hostMount1.set_path('test_path1')
        hostMount1.set_vmHostId('VH1')
        storage.add_mountPoints(hostMount1)
        hostMount2 = HostMountPoint()
        hostMount2.set_path('test_path2')
        hostMount2.set_vmHostId('VH2')
        storage.add_mountPoints(hostMount2)
        healthnmon_db_api.storage_volume_save(get_admin_context(),
                                              storage)

        vm = Vm()
        vm.set_id('vm-01')
        vm.set_name('vm-01')
        vm.set_vmHostId('VH1')
        healthnmon_db_api.vm_save(get_admin_context(), vm)

        vmhosts = \
            healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                 [vmhost_id])
        self.assertFalse(vmhosts is None,
                         'host get by id returned a none list')
        self.assertTrue(len(vmhosts) > 0,
                        'host get by id returned invalid number of list'
                        )

        healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(),
                                                [vmhost_id])

        vmhosts = \
            healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                 [vmhost_id])
        self.assertTrue(vmhosts is None or len(vmhosts) == 0,
                        'host not deleted')
Ejemplo n.º 26
0
    def test_vm_host_delete(self):
        vmhost_id = 'VH1'
        vmhost = VmHost()
        vmhost.id = vmhost_id
        vSwitch = VirtualSwitch()
        vSwitch.set_id('vSwitch-01')
        vSwitch.set_name('vSwitch-01')
        vSwitch.set_resourceManagerId('rmId')
        vSwitch.set_switchType('vSwitch')

        cost1 = Cost()
        cost1.set_value(100)
        cost1.set_units('USD')
        vSwitch.set_cost(cost1)

        portGroup = PortGroup()
        portGroup.set_id('pg-01')
        portGroup.set_name('pg-01')
        portGroup.set_resourceManagerId('rmId')
        portGroup.set_type('portgroup_type')
        portGroup.set_cost(cost1)
        vSwitch.add_portGroups(portGroup)
        vmhost.add_virtualSwitches(vSwitch)
        vmhost.add_portGroups(portGroup)
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost)

        vmhost2 = VmHost()
        vmhost2.set_id('VH2')
        healthnmon_db_api.vm_host_save(get_admin_context(), vmhost2)

        storage = StorageVolume()
        storage.set_id('sv-01')
        storage.set_name('storage-01')
        storage.set_resourceManagerId('rmId')
        storage.set_size(1234)
        storage.set_free(2345)
        storage.set_vmfsVolume(True)
        storage.set_shared(True)
        storage.set_assignedServerCount(1)
        storage.set_volumeType('VMFS')
        storage.set_volumeId('101')

        hostMount1 = HostMountPoint()
        hostMount1.set_path('test_path1')
        hostMount1.set_vmHostId('VH1')
        storage.add_mountPoints(hostMount1)
        hostMount2 = HostMountPoint()
        hostMount2.set_path('test_path2')
        hostMount2.set_vmHostId('VH2')
        storage.add_mountPoints(hostMount2)
        healthnmon_db_api.storage_volume_save(get_admin_context(), storage)

        vm = Vm()
        vm.set_id('vm-01')
        vm.set_name('vm-01')
        vm.set_vmHostId('VH1')
        healthnmon_db_api.vm_save(get_admin_context(), vm)

        vmhosts = \
            healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                 [vmhost_id])
        self.assertFalse(vmhosts is None,
                         'host get by id returned a none list')
        self.assertTrue(
            len(vmhosts) > 0, 'host get by id returned invalid number of list')

        #        self.assertRaises(Exception, healthnmon_db_api.vm_host_delete_by_ids,([vmhost_id]))

        healthnmon_db_api.vm_host_delete_by_ids(get_admin_context(),
                                                [vmhost_id])

        vmhosts = \
            healthnmon_db_api.vm_host_get_by_ids(get_admin_context(),
                                                 [vmhost_id])
        self.assertTrue(vmhosts is None or len(vmhosts) == 0,
                        'host not deleted')