def test_boot_servers_with_affinity_overquota(self): # Tests that we check server group member quotas and cleanup created # resources when we fail with OverQuota. self.flags(quota_server_group_members=1) # make sure we start with 0 servers servers = self.api.get_servers(detail=False) self.assertEqual(0, len(servers)) created_group = self.api.post_server_groups(self.affinity) ex = self.assertRaises(client.OpenStackApiException, self._boot_servers_to_group, created_group) self.assertEqual(403, ex.response.status_code) # _boot_servers_to_group creates 2 instances in the group in order, not # multiple servers in a single request. Since our quota is 1, the first # server create would pass, the second should fail, and we should be # left with 1 server and it's 1 block device mapping. servers = self.api.get_servers(detail=False) self.assertEqual(1, len(servers)) ctxt = context.get_admin_context() servers = db.instance_get_all(ctxt) self.assertEqual(1, len(servers)) ctxt_mgr = db_api.get_context_manager(ctxt) with ctxt_mgr.reader.using(ctxt): bdms = db_api._block_device_mapping_get_query(ctxt).all() self.assertEqual(1, len(bdms)) self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
def _create_uuid(context, bdm_id): # NOTE(mdbooth): This method is only required until uuid is made # non-nullable in a future release. # NOTE(mdbooth): We wrap this method in a retry loop because it can # fail (safely) on multi-master galera if concurrent updates happen on # different masters. It will never fail on single-master. We can only # ever need one retry. uuid = uuidutils.generate_uuid() values = {'uuid': uuid} compare = db_models.BlockDeviceMapping(id=bdm_id, uuid=None) # NOTE(mdbooth): We explicitly use an independent transaction context # here so as not to fail if: # 1. We retry. # 2. We're in a read transaction.This is an edge case of what's # normally a read operation. Forcing everything (transitively) # which reads a BDM to be in a write transaction for a narrow # temporary edge case is undesirable. tctxt = db_api.get_context_manager(context).writer.independent with tctxt.using(context): query = context.session.query(db_models.BlockDeviceMapping).\ filter_by(id=bdm_id) try: query.update_on_match(compare, 'id', values) except update_match.NoRowsMatched: # We can only get here if we raced, and another writer already # gave this bdm a uuid result = query.one() uuid = result['uuid'] assert (uuid is not None) return uuid
def _create_uuid(context, bdm_id): # NOTE(mdbooth): This method is only required until uuid is made # non-nullable in a future release. # NOTE(mdbooth): We wrap this method in a retry loop because it can # fail (safely) on multi-master galera if concurrent updates happen on # different masters. It will never fail on single-master. We can only # ever need one retry. uuid = uuidutils.generate_uuid() values = {'uuid': uuid} compare = db_models.BlockDeviceMapping(id=bdm_id, uuid=None) # NOTE(mdbooth): We explicitly use an independent transaction context # here so as not to fail if: # 1. We retry. # 2. We're in a read transaction.This is an edge case of what's # normally a read operation. Forcing everything (transitively) # which reads a BDM to be in a write transaction for a narrow # temporary edge case is undesirable. tctxt = db_api.get_context_manager(context).writer.independent with tctxt.using(context): query = context.session.query(db_models.BlockDeviceMapping).\ filter_by(id=bdm_id) try: query.update_on_match(compare, 'id', values) except update_match.NoRowsMatched: # We can only get here if we raced, and another writer already # gave this bdm a uuid result = query.one() uuid = result['uuid'] assert(uuid is not None) return uuid
import nova.conf from nova import config from nova import objects from nova import context from nova import db from nova.db.sqlalchemy import models LOG = logging.getLogger(__name__) CONF = nova.conf.CONF argv = [] default_config_files = ['/etc/nova/nova.conf'] config.parse_args(argv, default_config_files=default_config_files) objects.register_all() context = context.get_admin_context() nodes = objects.ComputeNodeList.get_all(context) [it['deleted'] for it in nodes] import nova.db.sqlalchemy.api as api cxt = api.get_context_manager(context) t = cxt.writer.using(context) s = t.__enter__() s.query(models.ComputeNode).first() s.rollback() s.close() cn = objects.ComputeNode(context) cn.memory_mb_used