def test_existing_consumer_after_gen_fail(self): """Tests that we require a consumer_generation after the appropriate microversion and that when the consumer already exists, then we raise a 400 when there is a mismatch on the existing generation. """ proj = project_obj.Project(self.ctx, id=1, external_id=self.project_id) self.mock_project_get.return_value = proj user = user_obj.User(self.ctx, id=1, external_id=self.user_id) self.mock_user_get.return_value = user consumer = consumer_obj.Consumer(self.ctx, id=1, project=proj, user=user, generation=42) self.mock_consumer_get.return_value = consumer consumer_gen = 2 # should NOT be ignored (and 2 is NOT expected) self.assertRaises(webob.exc.HTTPConflict, util.ensure_consumer, self.ctx, self.consumer_id, self.project_id, self.user_id, consumer_gen, self.after_version)
def allocate_from_provider(self, rp, rc, used, consumer_id=None, consumer=None): # NOTE(efried): If not specified, use a random consumer UUID - we don't # want to override any existing allocations from the test case. consumer_id = consumer_id or uuidutils.generate_uuid() if consumer is None: try: consumer = consumer_obj.Consumer.get_by_uuid( self.ctx, consumer_id) except exception.NotFound: consumer = consumer_obj.Consumer( self.ctx, uuid=consumer_id, user=self.user_obj, project=self.project_obj) consumer.create() alloc_list = rp_obj.AllocationList( self.ctx, objects=[ rp_obj.Allocation( self.ctx, resource_provider=rp, resource_class=rc, consumer=consumer, used=used)] ) alloc_list.replace_all() return alloc_list
def _make_allocation(self, inv_dict, alloc_dict): rp = self._create_provider('allocation_resource_provider') disk_inv = rp_obj.Inventory(context=self.ctx, resource_provider=rp, **inv_dict) inv_list = rp_obj.InventoryList(objects=[disk_inv]) rp.set_inventory(inv_list) consumer_id = alloc_dict['consumer_id'] try: c = consumer_obj.Consumer.get_by_uuid(self.ctx, consumer_id) except exception.NotFound: c = consumer_obj.Consumer(self.ctx, uuid=consumer_id, user=self.user_obj, project=self.project_obj) c.create() alloc = rp_obj.Allocation(self.ctx, resource_provider=rp, consumer=c, **alloc_dict) alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc]) alloc_list.replace_all() return rp, alloc
def test_existing_consumer_after_gen_matches_supplied_gen(self): """Tests that we require a consumer_generation after the appropriate microversion and that when the consumer already exists, then we ensure a matching generation is supplied """ proj = project_obj.Project(self.ctx, id=1, external_id=self.project_id) self.mock_project_get.return_value = proj user = user_obj.User(self.ctx, id=1, external_id=self.user_id) self.mock_user_get.return_value = user consumer = consumer_obj.Consumer(self.ctx, id=1, project=proj, user=user, generation=2) self.mock_consumer_get.return_value = consumer consumer_gen = 2 # should NOT be ignored (and 2 is expected) util.ensure_consumer(self.ctx, self.consumer_id, self.project_id, self.user_id, consumer_gen, self.after_version) self.mock_project_create.assert_not_called() self.mock_user_create.assert_not_called() self.mock_consumer_create.assert_not_called()
def start_fixture(self): super(AllocationFixture, self).start_fixture() self.context = context.get_admin_context() # For use creating and querying allocations/usages os.environ['ALT_USER_ID'] = uuidutils.generate_uuid() project_id = os.environ['PROJECT_ID'] user_id = os.environ['USER_ID'] alt_user_id = os.environ['ALT_USER_ID'] user = user_obj.User(self.context, external_id=user_id) user.create() alt_user = user_obj.User(self.context, external_id=alt_user_id) alt_user.create() project = project_obj.Project(self.context, external_id=project_id) project.create() # Stealing from the super rp_name = os.environ['RP_NAME'] rp_uuid = os.environ['RP_UUID'] rp = rp_obj.ResourceProvider(self.context, name=rp_name, uuid=rp_uuid) rp.create() # Create a first consumer for the DISK_GB consumer_id = uuidutils.generate_uuid() consumer = consumer_obj.Consumer(self.context, uuid=consumer_id, user=user, project=project) consumer.create() # Create some DISK_GB inventory and allocations. inventory = rp_obj.Inventory(self.context, resource_provider=rp, resource_class='DISK_GB', total=2048, step_size=10, min_unit=10, max_unit=600) inventory.obj_set_defaults() rp.add_inventory(inventory) alloc1 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='DISK_GB', consumer=consumer, used=500) alloc2 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='DISK_GB', consumer=consumer, used=500) alloc_list = rp_obj.AllocationList(self.context, objects=[alloc1, alloc2]) alloc_list.create_all() # Create a second consumer for the VCPU consumer_id = uuidutils.generate_uuid() consumer = consumer_obj.Consumer(self.context, uuid=consumer_id, user=user, project=project) consumer.create() # This consumer is referenced from the gabbits os.environ['CONSUMER_ID'] = consumer_id # Create some VCPU inventory and allocations. inventory = rp_obj.Inventory(self.context, resource_provider=rp, resource_class='VCPU', total=10, max_unit=4) inventory.obj_set_defaults() rp.add_inventory(inventory) alloc1 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='VCPU', consumer=consumer, used=2) alloc2 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='VCPU', consumer=consumer, used=4) alloc_list = rp_obj.AllocationList(self.context, objects=[alloc1, alloc2]) alloc_list.create_all() # Create a consumer object for a different user alt_consumer_id = uuidutils.generate_uuid() alt_consumer = consumer_obj.Consumer(self.context, uuid=alt_consumer_id, user=alt_user, project=project) alt_consumer.create() os.environ['ALT_CONSUMER_ID'] = alt_consumer_id # Create a couple of allocations for a different user. alloc1 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='DISK_GB', consumer=alt_consumer, used=20) alloc2 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='VCPU', consumer=alt_consumer, used=1) alloc_list = rp_obj.AllocationList(self.context, objects=[alloc1, alloc2]) alloc_list.create_all() # The ALT_RP_XXX variables are for a resource provider that has # not been created in the Allocation fixture os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid() os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
def test_reshape_concurrent_inventory_update(self): """Valid failure scenario for reshape(). We test a situation where the virt driver has constructed it's "after inventories and allocations" and sent those to the POST /reshape endpoint. The reshape POST handler does a quick check of the resource provider generations sent in the payload and they all check out. However, right before the call to resource_provider.reshape(), another thread legitimately changes the inventory of one of the providers involved in the reshape transaction. We should get a ConcurrentUpdateDetected in this case. """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer(self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() # then all our original providers cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate an instance on our compute node allocs = [ rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='VCPU', consumer=i1_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='DISK_GB', consumer=i1_consumer, used=100), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # each NUMA node gets half of the CPUs cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # The sharing provider gets a bunch of disk ss: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory(self.ctx, resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ]), } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) after_allocs = rp_obj.AllocationList( self.ctx, objects=[ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider rp_obj.Allocation(self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), ]) # OK, now before we call reshape(), here we emulate another thread # changing the inventory for the sharing storage provider in between # the time in the REST handler when the sharing storage provider's # generation was validated and the actual call to reshape() ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid) # Reduce the amount of storage to 2000, from 100000. new_ss_inv = rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=ss_threadB, resource_class='DISK_GB', total=2000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0) ]) ss_threadB.set_inventory(new_ss_inv) # Double check our storage provider's generation is now greater than # the original storage provider record being sent to reshape() self.assertGreater(ss_threadB.generation, ss.generation) # And we should legitimately get a failure now to reshape() due to # another thread updating one of the involved provider's generations self.assertRaises(exception.ConcurrentUpdateDetected, rp_obj.reshape, self.ctx, after_inventories, after_allocs)
def test_reshape(self): """We set up the following scenario: BEFORE: single compute node setup A single compute node with: - VCPU, MEMORY_MB, DISK_GB inventory - Two instances consuming CPU, RAM and DISK from that compute node AFTER: hierarchical + shared storage setup A compute node parent provider with: - MEMORY_MB Two NUMA node child providers containing: - VCPU Shared storage provider with: - DISK_GB Both instances have their resources split among the providers and shared storage accordingly """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer(self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() i2_uuid = uuids.instance2 i2_consumer = consumer_obj.Consumer(self.ctx, uuid=i2_uuid, user=self.user_obj, project=self.project_obj) i2_consumer.create() cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate both instances against the single compute node for consumer in (i1_consumer, i2_consumer): allocs = [ rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='VCPU', consumer=consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='DISK_GB', consumer=consumer, used=100), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() # Verify we have the allocations we expect for the BEFORE scenario before_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i1_uuid) self.assertEqual(3, len(before_allocs_i1)) self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid) before_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i2_uuid) self.assertEqual(3, len(before_allocs_i2)) self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid) # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # each NUMA node gets half of the CPUs cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # The sharing provider gets a bunch of disk ss: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory(self.ctx, resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ]), } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid) after_allocs = rp_obj.AllocationList( self.ctx, objects=[ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider rp_obj.Allocation(self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider rp_obj.Allocation(self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', consumer=i2_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i2_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=ss, resource_class='DISK_GB', consumer=i2_consumer, used=100), ]) rp_obj.reshape(self.ctx, after_inventories, after_allocs) # Verify that the inventories have been moved to the appropriate # providers in the AFTER scenario # The root compute node should only have MEMORY_MB, nothing else cn1_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, cn1) self.assertEqual(1, len(cn1_inv)) self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class) self.assertEqual(32768, cn1_inv[0].total) # Each NUMA node should only have half the original VCPU, nothing else numa0_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, cn1_numa0) self.assertEqual(1, len(numa0_inv)) self.assertEqual('VCPU', numa0_inv[0].resource_class) self.assertEqual(8, numa0_inv[0].total) numa1_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, cn1_numa1) self.assertEqual(1, len(numa1_inv)) self.assertEqual('VCPU', numa1_inv[0].resource_class) self.assertEqual(8, numa1_inv[0].total) # The sharing storage provider should only have DISK_GB, nothing else ss_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, ss) self.assertEqual(1, len(ss_inv)) self.assertEqual('DISK_GB', ss_inv[0].resource_class) self.assertEqual(100000, ss_inv[0].total) # Verify we have the allocations we expect for the AFTER scenario after_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i1_uuid) self.assertEqual(3, len(after_allocs_i1)) # Our VCPU allocation should be in the NUMA0 node vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid) after_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i2_uuid) self.assertEqual(3, len(after_allocs_i2)) # Our VCPU allocation should be in the NUMA1 node vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
def ensure_consumer(ctx, consumer_uuid, project_id, user_id, consumer_generation, want_version): """Ensures there are records in the consumers, projects and users table for the supplied external identifiers. Returns a tuple containing the populated Consumer object containing Project and User sub-objects and a boolean indicating whether a new Consumer object was created (as opposed to an existing consumer record retrieved) :note: If the supplied project or user external identifiers do not match an existing consumer's project and user identifiers, the existing consumer's project and user IDs are updated to reflect the supplied ones. :param ctx: The request context. :param consumer_uuid: The uuid of the consumer of the resources. :param project_id: The external ID of the project consuming the resources. :param user_id: The external ID of the user consuming the resources. :param consumer_generation: The generation provided by the user for this consumer. :param want_version: the microversion matcher. :raises webob.exc.HTTPConflict if consumer generation is required and there was a mismatch """ created_new_consumer = False requires_consumer_generation = want_version.matches((1, 28)) if project_id is None: project_id = CONF.placement.incomplete_consumer_project_id user_id = CONF.placement.incomplete_consumer_user_id try: proj = project_obj.Project.get_by_external_id(ctx, project_id) except exception.NotFound: # Auto-create the project if we found no record of it... try: proj = project_obj.Project(ctx, external_id=project_id) proj.create() except exception.ProjectExists: # No worries, another thread created this project already proj = project_obj.Project.get_by_external_id(ctx, project_id) try: user = user_obj.User.get_by_external_id(ctx, user_id) except exception.NotFound: # Auto-create the user if we found no record of it... try: user = user_obj.User(ctx, external_id=user_id) user.create() except exception.UserExists: # No worries, another thread created this user already user = user_obj.User.get_by_external_id(ctx, user_id) try: consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid) if requires_consumer_generation: if consumer.generation != consumer_generation: raise webob.exc.HTTPConflict( _('consumer generation conflict - ' 'expected %(expected_gen)s but got %(got_gen)s') % { 'expected_gen': consumer.generation, 'got_gen': consumer_generation, }, comment=errors.CONCURRENT_UPDATE) # NOTE(jaypipes): The user may have specified a different project and # user external ID than the one that we had for the consumer. If this # is the case, go ahead and modify the consumer record with the # newly-supplied project/user information, but do not bump the consumer # generation (since it will be bumped in the # AllocationList.replace_all() method). # # TODO(jaypipes): This means that there may be a partial update. # Imagine a scenario where a user calls POST /allocations, and the # payload references two consumers. The first consumer is a new # consumer and is auto-created. The second consumer is an existing # consumer, but contains a different project or user ID than the # existing consumer's record. If the eventual call to # AllocationList.replace_all() fails for whatever reason (say, a # resource provider generation conflict or out of resources failure), # we will end up deleting the auto-created consumer but we MAY not undo # the changes to the second consumer's project and user ID. I say MAY # and not WILL NOT because I'm not sure that the exception that gets # raised from AllocationList.replace_all() will cause the context # manager's transaction to rollback automatically. I believe that the # same transaction context is used for both util.ensure_consumer() and # AllocationList.replace_all() within the same HTTP request, but need # to test this to be 100% certain... if (project_id != consumer.project.external_id or user_id != consumer.user.external_id): LOG.debug( "Supplied project or user ID for consumer %s was " "different than existing record. Updating consumer " "record.", consumer_uuid) consumer.project = proj consumer.user = user consumer.update() except exception.NotFound: # If we are attempting to modify or create allocations after 1.26, we # need a consumer generation specified. The user must have specified # None for the consumer generation if we get here, since there was no # existing consumer with this UUID and therefore the user should be # indicating that they expect the consumer did not exist. if requires_consumer_generation: if consumer_generation is not None: raise webob.exc.HTTPConflict( _('consumer generation conflict - ' 'expected null but got %s') % consumer_generation, comment=errors.CONCURRENT_UPDATE) # No such consumer. This is common for new allocations. Create the # consumer record try: consumer = consumer_obj.Consumer(ctx, uuid=consumer_uuid, project=proj, user=user) consumer.create() created_new_consumer = True except exception.ConsumerExists: # No worries, another thread created this user already consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid) return consumer, created_new_consumer
def test_delete_consumer_if_no_allocs(self): """AllocationList.replace_all() should attempt to delete consumers that no longer have any allocations. Due to the REST API not having any way to query for consumers directly (only via the GET /allocations/{consumer_uuid} endpoint which returns an empty dict even when no consumer record exists for the {consumer_uuid}) we need to do this functional test using only the object layer. """ # We will use two consumers in this test, only one of which will get # all of its allocations deleted in a transaction (and we expect that # consumer record to be deleted) c1 = consumer_obj.Consumer(self.ctx, uuid=uuids.consumer1, user=self.user_obj, project=self.project_obj) c1.create() c2 = consumer_obj.Consumer(self.ctx, uuid=uuids.consumer2, user=self.user_obj, project=self.project_obj) c2.create() # Create some inventory that we will allocate cn1 = self._create_provider('cn1') tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8) tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000) # Now allocate some of that inventory to two different consumers allocs = [ rp_obj.Allocation(self.ctx, consumer=c1, resource_provider=cn1, resource_class=fields.ResourceClass.VCPU, used=1), rp_obj.Allocation(self.ctx, consumer=c1, resource_provider=cn1, resource_class=fields.ResourceClass.MEMORY_MB, used=512), rp_obj.Allocation(self.ctx, consumer=c2, resource_provider=cn1, resource_class=fields.ResourceClass.VCPU, used=1), rp_obj.Allocation(self.ctx, consumer=c2, resource_provider=cn1, resource_class=fields.ResourceClass.MEMORY_MB, used=512), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() # Validate that we have consumer records for both consumers for c_uuid in (uuids.consumer1, uuids.consumer2): c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid) self.assertIsNotNone(c_obj) # OK, now "remove" the allocation for consumer2 by setting the used # value for both allocated resources to 0 and re-running the # AllocationList.replace_all(). This should end up deleting the # consumer record for consumer2 allocs = [ rp_obj.Allocation(self.ctx, consumer=c2, resource_provider=cn1, resource_class=fields.ResourceClass.VCPU, used=0), rp_obj.Allocation(self.ctx, consumer=c2, resource_provider=cn1, resource_class=fields.ResourceClass.MEMORY_MB, used=0), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() # consumer1 should still exist... c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1) self.assertIsNotNone(c_obj) # but not consumer2... self.assertRaises(exception.NotFound, consumer_obj.Consumer.get_by_uuid, self.ctx, uuids.consumer2) # DELETE /allocations/{consumer_uuid} is the other place where we # delete all allocations for a consumer. Let's delete all for consumer1 # and check that the consumer record is deleted alloc_list = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, uuids.consumer1) alloc_list.delete_all() # consumer1 should no longer exist in the DB since we just deleted all # of its allocations self.assertRaises(exception.NotFound, consumer_obj.Consumer.get_by_uuid, self.ctx, uuids.consumer1)
def ensure_consumer(ctx, consumer_uuid, project_id, user_id, consumer_generation, want_version): """Ensures there are records in the consumers, projects and users table for the supplied external identifiers. Returns a populated Consumer object containing Project and User sub-objects :param ctx: The request context. :param consumer_uuid: The uuid of the consumer of the resources. :param project_id: The external ID of the project consuming the resources. :param user_id: The external ID of the user consuming the resources. :param consumer_generation: The generation provided by the user for this consumer. :param want_version: the microversion matcher. :raises webob.exc.HTTPConflict if consumer generation is required and there was a mismatch """ requires_consumer_generation = want_version.matches((1, 28)) if project_id is None: project_id = CONF.placement.incomplete_consumer_project_id user_id = CONF.placement.incomplete_consumer_user_id try: proj = project_obj.Project.get_by_external_id(ctx, project_id) except exception.NotFound: # Auto-create the project if we found no record of it... try: proj = project_obj.Project(ctx, external_id=project_id) proj.create() except exception.ProjectExists: # No worries, another thread created this project already proj = project_obj.Project.get_by_external_id(ctx, project_id) try: user = user_obj.User.get_by_external_id(ctx, user_id) except exception.NotFound: # Auto-create the user if we found no record of it... try: user = user_obj.User(ctx, external_id=user_id) user.create() except exception.UserExists: # No worries, another thread created this user already user = user_obj.User.get_by_external_id(ctx, user_id) try: consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid) if requires_consumer_generation: if consumer.generation != consumer_generation: raise webob.exc.HTTPConflict( _('consumer generation conflict - ' 'expected %(expected_gen)s but got %(got_gen)s') % { 'expected_gen': consumer.generation, 'got_gen': consumer_generation, }) except exception.NotFound: # If we are attempting to modify or create allocations after 1.26, we # need a consumer generation specified. The user must have specified # None for the consumer generation if we get here, since there was no # existing consumer with this UUID and therefore the user should be # indicating that they expect the consumer did not exist. if requires_consumer_generation: if consumer_generation is not None: raise webob.exc.HTTPConflict( _('consumer generation conflict - ' 'expected null but got %s') % consumer_generation) # No such consumer. This is common for new allocations. Create the # consumer record try: consumer = consumer_obj.Consumer(ctx, uuid=consumer_uuid, project=proj, user=user) consumer.create() except exception.ConsumerExists: # No worries, another thread created this user already consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid) return consumer