def test_create_and_clear(self): """Test that a used of 0 in an allocation wipes allocations.""" consumer_uuid = uuidsentinel.consumer # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=consumer_uuid, user=self.user_obj, project=self.project_obj) inst_consumer.create() rp_class = orc.DISK_GB target_rp = self._make_rp_and_inventory(resource_class=rp_class, max_unit=500) # Create two allocations with values and confirm the resulting # usage is as expected. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=100) allocation2 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(2, len(allocations)) usage = sum(alloc.used for alloc in allocations) self.assertEqual(300, usage) # Create two allocations, one with 0 used, to confirm the # resulting usage is only of one. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=0) allocation2 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) # add a source rp and a migration consumer migration_uuid = uuidsentinel.migration # Create a consumer representing the migration mig_consumer = consumer_obj.Consumer( self.ctx, uuid=migration_uuid, user=self.user_obj, project=self.project_obj) mig_consumer.create() source_rp = self._make_rp_and_inventory( rp_name=uuidsentinel.source_name, rp_uuid=uuidsentinel.source_uuid, resource_class=rp_class, max_unit=500) # Create two allocations, one as the consumer, one as the # migration. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation2 = alloc_obj.Allocation( resource_provider=source_rp, consumer=mig_consumer, resource_class=rp_class, used=200) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) # Check primary consumer allocations. allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) # Check migration allocations. allocations = alloc_obj.get_all_by_consumer_id( self.ctx, migration_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) # Clear the migration and confirm the target. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation2 = alloc_obj.Allocation( resource_provider=source_rp, consumer=mig_consumer, resource_class=rp_class, used=0) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) allocations = alloc_obj.get_all_by_consumer_id( self.ctx, migration_uuid) self.assertEqual(0, len(allocations))
def test_create_exceeding_capacity_allocation(self): """Tests on a list of allocations which contains an invalid allocation exceeds resource provider's capacity. Expect InvalidAllocationCapacityExceeded to be raised and all allocations in the list should not be applied. """ empty_rp = self._create_provider('empty_rp') full_rp = self._create_provider('full_rp') for rp in (empty_rp, full_rp): tb.add_inventory(rp, orc.VCPU, 24, allocation_ratio=16.0) tb.add_inventory(rp, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64) # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, project=self.project_obj) inst_consumer.create() # First create a allocation to consume full_rp's resource. alloc_list = [ alloc_obj.Allocation( consumer=inst_consumer, resource_provider=full_rp, resource_class=orc.VCPU, used=12), alloc_obj.Allocation( consumer=inst_consumer, resource_provider=full_rp, resource_class=orc.MEMORY_MB, used=1024) ] alloc_obj.replace_all(self.ctx, alloc_list) # Create a consumer representing the second instance inst2_consumer = consumer_obj.Consumer( self.ctx, uuid=uuidsentinel.instance2, user=self.user_obj, project=self.project_obj) inst2_consumer.create() # Create an allocation list consisting of valid requests and an invalid # request exceeding the memory full_rp can provide. alloc_list = [ alloc_obj.Allocation( consumer=inst2_consumer, resource_provider=empty_rp, resource_class=orc.VCPU, used=12), alloc_obj.Allocation( consumer=inst2_consumer, resource_provider=empty_rp, resource_class=orc.MEMORY_MB, used=512), alloc_obj.Allocation( consumer=inst2_consumer, resource_provider=full_rp, resource_class=orc.VCPU, used=12), alloc_obj.Allocation( consumer=inst2_consumer, resource_provider=full_rp, resource_class=orc.MEMORY_MB, used=512), ] self.assertRaises(exception.InvalidAllocationCapacityExceeded, alloc_obj.replace_all, self.ctx, alloc_list) # Make sure that allocations of both empty_rp and full_rp remain # unchanged. allocations = alloc_obj.get_all_by_resource_provider(self.ctx, full_rp) self.assertEqual(2, len(allocations)) allocations = alloc_obj.get_all_by_resource_provider( self.ctx, empty_rp) self.assertEqual(0, len(allocations))
def test_allocation_list_create(self): max_unit = 10 consumer_uuid = uuidsentinel.consumer # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=consumer_uuid, user=self.user_obj, project=self.project_obj) inst_consumer.create() # Create two resource providers rp1_name = uuidsentinel.rp1_name rp1_uuid = uuidsentinel.rp1_uuid rp1_class = orc.DISK_GB rp1_used = 6 rp2_name = uuidsentinel.rp2_name rp2_uuid = uuidsentinel.rp2_uuid rp2_class = orc.IPV4_ADDRESS rp2_used = 2 rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) rp2 = self._create_provider(rp2_name, uuid=rp2_uuid) # Two allocations, one for each resource provider. allocation_1 = alloc_obj.Allocation( resource_provider=rp1, consumer=inst_consumer, resource_class=rp1_class, used=rp1_used) allocation_2 = alloc_obj.Allocation( resource_provider=rp2, consumer=inst_consumer, resource_class=rp2_class, used=rp2_used) allocation_list = [allocation_1, allocation_2] # There's no inventory, we have a failure. error = self.assertRaises(exception.InvalidInventory, alloc_obj.replace_all, self.ctx, allocation_list) # Confirm that the resource class string, not index, is in # the exception and resource providers are listed by uuid. self.assertIn(rp1_class, str(error)) self.assertIn(rp2_class, str(error)) self.assertIn(rp1.uuid, str(error)) self.assertIn(rp2.uuid, str(error)) # Add inventory for one of the two resource providers. This should also # fail, since rp2 has no inventory. tb.add_inventory(rp1, rp1_class, 1024, max_unit=1) self.assertRaises(exception.InvalidInventory, alloc_obj.replace_all, self.ctx, allocation_list) # Add inventory for the second resource provider tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1) # Now the allocations will still fail because max_unit 1 self.assertRaises(exception.InvalidAllocationConstraintsViolated, alloc_obj.replace_all, self.ctx, allocation_list) inv1 = inv_obj.Inventory(resource_provider=rp1, resource_class=rp1_class, total=1024, max_unit=max_unit) rp1.set_inventory([inv1]) inv2 = inv_obj.Inventory(resource_provider=rp2, resource_class=rp2_class, total=255, reserved=2, max_unit=max_unit) rp2.set_inventory([inv2]) # Now we can finally allocate. alloc_obj.replace_all(self.ctx, allocation_list) # Check that those allocations changed usage on each # resource provider. rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) rp2_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp2_uuid) self.assertEqual(rp1_used, rp1_usage[0].usage) self.assertEqual(rp2_used, rp2_usage[0].usage) # redo one allocation # TODO(cdent): This does not currently behave as expected # because a new allocation is created, adding to the total # used, not replacing. rp1_used += 1 self.allocate_from_provider( rp1, rp1_class, rp1_used, consumer=inst_consumer) rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) self.assertEqual(rp1_used, rp1_usage[0].usage) # delete the allocations for the consumer # NOTE(cdent): The database uses 'consumer_id' for the # column, presumably because some ids might not be uuids, at # some point in the future. consumer_allocations = alloc_obj.get_all_by_consumer_id( self.ctx, consumer_uuid) alloc_obj.delete_all(self.ctx, consumer_allocations) rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) rp2_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp2_uuid) self.assertEqual(0, rp1_usage[0].usage) self.assertEqual(0, rp2_usage[0].usage)
def test_delete_consumer_if_no_allocs(self): """alloc_obj.replace_all() should attempt to delete consumers that no longer have any allocations. Due to the REST API not having any way to query for consumers directly (only via the GET /allocations/{consumer_uuid} endpoint which returns an empty dict even when no consumer record exists for the {consumer_uuid}) we need to do this functional test using only the object layer. """ # We will use two consumers in this test, only one of which will get # all of its allocations deleted in a transaction (and we expect that # consumer record to be deleted) c1 = consumer_obj.Consumer(self.ctx, uuid=uuids.consumer1, user=self.user_obj, project=self.project_obj) c1.create() c2 = consumer_obj.Consumer(self.ctx, uuid=uuids.consumer2, user=self.user_obj, project=self.project_obj) c2.create() # Create some inventory that we will allocate cn1 = self._create_provider('cn1') tb.add_inventory(cn1, orc.VCPU, 8) tb.add_inventory(cn1, orc.MEMORY_MB, 2048) tb.add_inventory(cn1, orc.DISK_GB, 2000) # Now allocate some of that inventory to two different consumers allocs = [ alloc_obj.Allocation(consumer=c1, resource_provider=cn1, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=c1, resource_provider=cn1, resource_class=orc.MEMORY_MB, used=512), alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.MEMORY_MB, used=512), ] alloc_obj.replace_all(self.ctx, allocs) # Validate that we have consumer records for both consumers for c_uuid in (uuids.consumer1, uuids.consumer2): c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid) self.assertIsNotNone(c_obj) # OK, now "remove" the allocation for consumer2 by setting the used # value for both allocated resources to 0 and re-running the # alloc_obj.replace_all(). This should end up deleting the # consumer record for consumer2 allocs = [ alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.VCPU, used=0), alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.MEMORY_MB, used=0), ] alloc_obj.replace_all(self.ctx, allocs) # consumer1 should still exist... c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1) self.assertIsNotNone(c_obj) # but not consumer2... self.assertRaises(exception.NotFound, consumer_obj.Consumer.get_by_uuid, self.ctx, uuids.consumer2) # DELETE /allocations/{consumer_uuid} is the other place where we # delete all allocations for a consumer. Let's delete all for consumer1 # and check that the consumer record is deleted alloc_list = alloc_obj.get_all_by_consumer_id(self.ctx, uuids.consumer1) alloc_obj.delete_all(self.ctx, alloc_list) # consumer1 should no longer exist in the DB since we just deleted all # of its allocations self.assertRaises(exception.NotFound, consumer_obj.Consumer.get_by_uuid, self.ctx, uuids.consumer1)
def test_multi_provider_allocation(self): """Tests that an allocation that includes more than one resource provider can be created, listed and deleted properly. Bug #1707669 highlighted a situation that arose when attempting to remove part of an allocation for a source host during a resize operation where the exiting allocation was not being properly deleted. """ cn_source = self._create_provider('cn_source') cn_dest = self._create_provider('cn_dest') # Add same inventory to both source and destination host for cn in (cn_source, cn_dest): tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) tb.add_inventory(cn, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64, allocation_ratio=1.5) # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer(self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, project=self.project_obj) inst_consumer.create() # Now create an allocation that represents a move operation where the # scheduler has selected cn_dest as the target host and created a # "doubled-up" allocation for the duration of the move operation alloc_list = [ alloc_obj.Allocation(consumer=inst_consumer, resource_provider=cn_source, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=inst_consumer, resource_provider=cn_source, resource_class=orc.MEMORY_MB, used=256), alloc_obj.Allocation(consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.MEMORY_MB, used=256), ] alloc_obj.replace_all(self.ctx, alloc_list) src_allocs = alloc_obj.get_all_by_resource_provider( self.ctx, cn_source) self.assertEqual(2, len(src_allocs)) dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest) self.assertEqual(2, len(dest_allocs)) consumer_allocs = alloc_obj.get_all_by_consumer_id( self.ctx, uuidsentinel.instance) self.assertEqual(4, len(consumer_allocs)) # Validate that when we create an allocation for a consumer that we # delete any existing allocation and replace it with what the new. # Here, we're emulating the step that occurs on confirm_resize() where # the source host pulls the existing allocation for the instance and # removes any resources that refer to itself and saves the allocation # back to placement new_alloc_list = [ alloc_obj.Allocation(consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.MEMORY_MB, used=256), ] alloc_obj.replace_all(self.ctx, new_alloc_list) src_allocs = alloc_obj.get_all_by_resource_provider( self.ctx, cn_source) self.assertEqual(0, len(src_allocs)) dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest) self.assertEqual(2, len(dest_allocs)) consumer_allocs = alloc_obj.get_all_by_consumer_id( self.ctx, uuidsentinel.instance) self.assertEqual(2, len(consumer_allocs))
def test_set_allocations_retry(self, mock_log): """Test server side allocation write retry handling.""" # Create a single resource provider and give it some inventory. rp1 = self._create_provider('rp1') tb.add_inventory(rp1, orc.VCPU, 24, allocation_ratio=16.0) tb.add_inventory(rp1, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64) original_generation = rp1.generation # Verify the generation is what we expect (we'll be checking again # later). self.assertEqual(2, original_generation) # Create a consumer and have it make an allocation. inst_consumer = consumer_obj.Consumer(self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, project=self.project_obj) inst_consumer.create() alloc_list = [ alloc_obj.Allocation(consumer=inst_consumer, resource_provider=rp1, resource_class=orc.VCPU, used=12), alloc_obj.Allocation(consumer=inst_consumer, resource_provider=rp1, resource_class=orc.MEMORY_MB, used=1024) ] # Make sure the right exception happens when the retry loop expires. with mock.patch.object(alloc_obj, 'RP_CONFLICT_RETRY_COUNT', 0): self.assertRaises( exception.ResourceProviderConcurrentUpdateDetected, alloc_obj.replace_all, self.ctx, alloc_list) mock_log.warning.assert_called_with( 'Exceeded retry limit of %d on allocations write', 0) # Make sure the right thing happens after a small number of failures. # There's a bit of mock magic going on here to enusre that we can # both do some side effects on _set_allocations as well as have the # real behavior. Two generation conflicts and then a success. mock_log.reset_mock() with mock.patch.object(alloc_obj, 'RP_CONFLICT_RETRY_COUNT', 3): unmocked_set = alloc_obj._set_allocations with mock.patch('placement.objects.allocation.' '_set_allocations') as mock_set: exceptions = iter([ exception.ResourceProviderConcurrentUpdateDetected(), exception.ResourceProviderConcurrentUpdateDetected(), ]) def side_effect(*args, **kwargs): try: raise next(exceptions) except StopIteration: return unmocked_set(*args, **kwargs) mock_set.side_effect = side_effect alloc_obj.replace_all(self.ctx, alloc_list) self.assertEqual(2, mock_log.debug.call_count) mock_log.debug.called_with( 'Retrying allocations write on resource provider ' 'generation conflict') self.assertEqual(3, mock_set.call_count) # Confirm we're using a different rp object after the change # and that it has a higher generation. new_rp = alloc_list[0].resource_provider self.assertEqual(original_generation, rp1.generation) self.assertEqual(original_generation + 1, new_rp.generation)
def test_allocation_checking(self): """Test that allocation check logic works with 2 resource classes on one provider. If this fails, we get a KeyError at replace_all() """ max_unit = 10 consumer_uuid = uuidsentinel.consumer consumer_uuid2 = uuidsentinel.consumer2 # Create a consumer representing the two instances consumer = consumer_obj.Consumer(self.ctx, uuid=consumer_uuid, user=self.user_obj, project=self.project_obj) consumer.create() consumer2 = consumer_obj.Consumer(self.ctx, uuid=consumer_uuid2, user=self.user_obj, project=self.project_obj) consumer2.create() # Create one resource provider with 2 classes rp1_name = uuidsentinel.rp1_name rp1_uuid = uuidsentinel.rp1_uuid rp1_class = orc.DISK_GB rp1_used = 6 rp2_class = orc.IPV4_ADDRESS rp2_used = 2 rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) tb.add_inventory(rp1, rp1_class, 1024, max_unit=max_unit) tb.add_inventory(rp1, rp2_class, 255, reserved=2, max_unit=max_unit) # create the allocations for a first consumer allocation_1 = alloc_obj.Allocation(resource_provider=rp1, consumer=consumer, resource_class=rp1_class, used=rp1_used) allocation_2 = alloc_obj.Allocation(resource_provider=rp1, consumer=consumer, resource_class=rp2_class, used=rp2_used) allocation_list = [allocation_1, allocation_2] alloc_obj.replace_all(self.ctx, allocation_list) # create the allocations for a second consumer, until we have # allocations for more than one consumer in the db, then we # won't actually be doing real allocation math, which triggers # the sql monster. allocation_1 = alloc_obj.Allocation(resource_provider=rp1, consumer=consumer2, resource_class=rp1_class, used=rp1_used) allocation_2 = alloc_obj.Allocation(resource_provider=rp1, consumer=consumer2, resource_class=rp2_class, used=rp2_used) allocation_list = [allocation_1, allocation_2] # If we are joining wrong, this will be a KeyError alloc_obj.replace_all(self.ctx, allocation_list)