def test_find(self): rp = resource_provider.ResourceProvider( self.context, uuid=uuids.rp_uuid) inv_list = [ inventory.Inventory( resource_provider=rp, resource_class=orc.VCPU, total=24), inventory.Inventory( resource_provider=rp, resource_class=orc.MEMORY_MB, total=10240), ] found = inventory.find(inv_list, orc.MEMORY_MB) self.assertIsNotNone(found) self.assertEqual(10240, found.total) found = inventory.find(inv_list, orc.VCPU) self.assertIsNotNone(found) self.assertEqual(24, found.total) found = inventory.find(inv_list, orc.DISK_GB) self.assertIsNone(found) # Try an integer resource class identifier... self.assertRaises(ValueError, inventory.find, inv_list, VCPU_ID) # Use an invalid string... self.assertIsNone(inventory.find(inv_list, 'HOUSE'))
def test_destroy_fail_with_inventory(self): """Test that we raise an exception when attempting to delete a resource class that is referenced in an inventory record. """ rc = rc_obj.ResourceClass( self.ctx, name='CUSTOM_IRON_NFV', ) rc.create() rp = rp_obj.ResourceProvider( self.ctx, name='my rp', uuid=uuidsentinel.rp, ) rp.create() inv = inv_obj.Inventory( resource_provider=rp, resource_class='CUSTOM_IRON_NFV', total=1, ) rp.set_inventory([inv]) self.assertRaises(exception.ResourceClassInUse, rc.destroy) rp.set_inventory([]) rc.destroy() rc_list = rc_obj.get_all(self.ctx) rc_ids = (r.id for r in rc_list) self.assertNotIn(rc.id, rc_ids)
def test_update_inventory_not_found(self): rp = self._create_provider(uuidsentinel.rp_name) disk_inv = inv_obj.Inventory(resource_provider=rp, resource_class='DISK_GB', total=2048) error = self.assertRaises(exception.NotFound, rp.update_inventory, disk_inv) self.assertIn('No inventory of class DISK_GB found', str(error))
def add_inventory(rp, rc, total, **kwargs): ensure_rc(rp._context, rc) kwargs.setdefault('max_unit', total) inv = inv_obj.Inventory(rp._context, resource_provider=rp, resource_class=rc, total=total, **kwargs) rp.add_inventory(inv) return inv
def _make_allocation(self, inv_dict, alloc_dict): alloc_dict = copy.copy(alloc_dict) rp = self._create_provider('allocation_resource_provider') disk_inv = inv_obj.Inventory(resource_provider=rp, **inv_dict) rp.set_inventory([disk_inv]) consumer_id = alloc_dict.pop('consumer_id') consumer = ensure_consumer( self.ctx, self.user_obj, self.project_obj, consumer_id) alloc = alloc_obj.Allocation( resource_provider=rp, consumer=consumer, **alloc_dict) alloc_obj.replace_all(self.ctx, [alloc]) return rp, alloc
def test_get_all_one_allocation(self): db_rp, _ = self._make_allocation(tb.DISK_INVENTORY, tb.DISK_ALLOCATION) inv = inv_obj.Inventory(resource_provider=db_rp, resource_class=orc.DISK_GB, total=1024) db_rp.set_inventory([inv]) usages = usage_obj.get_all_by_resource_provider_uuid( self.ctx, db_rp.uuid) self.assertEqual(1, len(usages)) self.assertEqual(2, usages[0].usage) self.assertEqual(orc.DISK_GB, usages[0].resource_class)
def test_set_defaults(self): rp = resource_provider.ResourceProvider(self.context, id=_RESOURCE_PROVIDER_ID, uuid=_RESOURCE_PROVIDER_UUID) kwargs = dict(resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16) inv = inventory.Inventory(self.context, **kwargs) self.assertEqual(0, inv.reserved) self.assertEqual(1, inv.min_unit) self.assertEqual(1, inv.max_unit) self.assertEqual(1, inv.step_size) self.assertEqual(1.0, inv.allocation_ratio)
def test_capacity(self): rp = resource_provider.ResourceProvider(self.context, id=_RESOURCE_PROVIDER_ID, uuid=_RESOURCE_PROVIDER_UUID) kwargs = dict(resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16, reserved=16) inv = inventory.Inventory(self.context, **kwargs) self.assertEqual(0, inv.capacity) inv.reserved = 15 self.assertEqual(1, inv.capacity) inv.allocation_ratio = 2.0 self.assertEqual(2, inv.capacity)
def test_set_inventory_unknown_resource_class(self): """Test attempting to set inventory to an unknown resource class raises an exception. """ rp = self._create_provider('compute-host') inv = inv_obj.Inventory( rp._context, resource_provider=rp, resource_class='UNKNOWN', total=1024, reserved=15, min_unit=10, max_unit=100, step_size=10, allocation_ratio=1.0) self.assertRaises( exception.ResourceClassNotFound, rp.add_inventory, inv)
def make_inventory_object(resource_provider, resource_class, **data): """Single place to catch malformed Inventories.""" # TODO(cdent): Some of the validation checks that are done here # could be done via JSONschema (using, for example, "minimum": # 0) for non-negative integers. It's not clear if that is # duplication or decoupling so leaving it as this for now. try: inventory = inv_obj.Inventory( resource_provider=resource_provider, resource_class=resource_class, **data) except (ValueError, TypeError) as exc: raise webob.exc.HTTPBadRequest( _('Bad inventory %(class)s for resource provider ' '%(rp_uuid)s: %(error)s') % {'class': resource_class, 'rp_uuid': resource_provider.uuid, 'error': exc}) return inventory
def test_update_inventory_violates_allocation(self, mock_log): # Compute nodes that are reconfigured have to be able to set # their inventory to something that violates allocations so # we need to make that possible. rp, allocation = self._make_allocation(tb.DISK_INVENTORY, tb.DISK_ALLOCATION) # attempt to set inventory to less than currently allocated # amounts new_total = 1 disk_inv = inv_obj.Inventory( resource_provider=rp, resource_class=orc.DISK_GB, total=new_total) rp.update_inventory(disk_inv) usages = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp.uuid) self.assertEqual(allocation.used, usages[0].usage) inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) self.assertEqual(new_total, inv_list[0].total) mock_log.warning.assert_called_once_with( mock.ANY, {'uuid': rp.uuid, 'resource': 'DISK_GB'})
def test_set_inventory_fail_in_use(self): """Test attempting to set inventory which would result in removing an inventory record for a resource class that still has allocations against it. """ rp = self._create_provider('compute-host') tb.add_inventory(rp, 'VCPU', 12) self.allocate_from_provider(rp, 'VCPU', 1) inv = inv_obj.Inventory( resource_provider=rp, resource_class='MEMORY_MB', total=1024, reserved=0, min_unit=256, max_unit=1024, step_size=256, allocation_ratio=1.0, ) self.assertRaises(exception.InventoryInUse, rp.set_inventory, [inv])
def add_inventory(rp, rc, total, **kwargs): kwargs.setdefault('max_unit', total) inv = inv_obj.Inventory(rp, resource_class=rc, total=total, **kwargs) rp.add_inventory(inv) return inv
def test_allocation_list_create(self): max_unit = 10 consumer_uuid = uuidsentinel.consumer # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=consumer_uuid, user=self.user_obj, project=self.project_obj) inst_consumer.create() # Create two resource providers rp1_name = uuidsentinel.rp1_name rp1_uuid = uuidsentinel.rp1_uuid rp1_class = orc.DISK_GB rp1_used = 6 rp2_name = uuidsentinel.rp2_name rp2_uuid = uuidsentinel.rp2_uuid rp2_class = orc.IPV4_ADDRESS rp2_used = 2 rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) rp2 = self._create_provider(rp2_name, uuid=rp2_uuid) # Two allocations, one for each resource provider. allocation_1 = alloc_obj.Allocation( resource_provider=rp1, consumer=inst_consumer, resource_class=rp1_class, used=rp1_used) allocation_2 = alloc_obj.Allocation( resource_provider=rp2, consumer=inst_consumer, resource_class=rp2_class, used=rp2_used) allocation_list = [allocation_1, allocation_2] # There's no inventory, we have a failure. error = self.assertRaises(exception.InvalidInventory, alloc_obj.replace_all, self.ctx, allocation_list) # Confirm that the resource class string, not index, is in # the exception and resource providers are listed by uuid. self.assertIn(rp1_class, str(error)) self.assertIn(rp2_class, str(error)) self.assertIn(rp1.uuid, str(error)) self.assertIn(rp2.uuid, str(error)) # Add inventory for one of the two resource providers. This should also # fail, since rp2 has no inventory. tb.add_inventory(rp1, rp1_class, 1024, max_unit=1) self.assertRaises(exception.InvalidInventory, alloc_obj.replace_all, self.ctx, allocation_list) # Add inventory for the second resource provider tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1) # Now the allocations will still fail because max_unit 1 self.assertRaises(exception.InvalidAllocationConstraintsViolated, alloc_obj.replace_all, self.ctx, allocation_list) inv1 = inv_obj.Inventory(resource_provider=rp1, resource_class=rp1_class, total=1024, max_unit=max_unit) rp1.set_inventory([inv1]) inv2 = inv_obj.Inventory(resource_provider=rp2, resource_class=rp2_class, total=255, reserved=2, max_unit=max_unit) rp2.set_inventory([inv2]) # Now we can finally allocate. alloc_obj.replace_all(self.ctx, allocation_list) # Check that those allocations changed usage on each # resource provider. rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) rp2_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp2_uuid) self.assertEqual(rp1_used, rp1_usage[0].usage) self.assertEqual(rp2_used, rp2_usage[0].usage) # redo one allocation # TODO(cdent): This does not currently behave as expected # because a new allocation is created, adding to the total # used, not replacing. rp1_used += 1 self.allocate_from_provider( rp1, rp1_class, rp1_used, consumer=inst_consumer) rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) self.assertEqual(rp1_used, rp1_usage[0].usage) # delete the allocations for the consumer # NOTE(cdent): The database uses 'consumer_id' for the # column, presumably because some ids might not be uuids, at # some point in the future. consumer_allocations = alloc_obj.get_all_by_consumer_id( self.ctx, consumer_uuid) alloc_obj.delete_all(self.ctx, consumer_allocations) rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) rp2_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp2_uuid) self.assertEqual(0, rp1_usage[0].usage) self.assertEqual(0, rp2_usage[0].usage)
def test_reshape(self): """We set up the following scenario: BEFORE: single compute node setup A single compute node with: - VCPU, MEMORY_MB, DISK_GB inventory - Two instances consuming CPU, RAM and DISK from that compute node AFTER: hierarchical + shared storage setup A compute node parent provider with: - MEMORY_MB Two NUMA node child providers containing: - VCPU Shared storage provider with: - DISK_GB Both instances have their resources split among the providers and shared storage accordingly """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer( self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() i2_uuid = uuids.instance2 i2_consumer = consumer_obj.Consumer( self.ctx, uuid=i2_uuid, user=self.user_obj, project=self.project_obj) i2_consumer.create() cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate both instances against the single compute node for consumer in (i1_consumer, i2_consumer): allocs = [ alloc_obj.Allocation( resource_provider=cn1, resource_class='VCPU', consumer=consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=consumer, used=1024), alloc_obj.Allocation( resource_provider=cn1, resource_class='DISK_GB', consumer=consumer, used=100), ] alloc_obj.replace_all(self.ctx, allocs) # Verify we have the allocations we expect for the BEFORE scenario before_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid) self.assertEqual(3, len(before_allocs_i1)) self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid) before_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid) self.assertEqual(3, len(before_allocs_i2)) self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid) # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: [ inv_obj.Inventory( resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ], # each NUMA node gets half of the CPUs cn1_numa0: [ inv_obj.Inventory( resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ], cn1_numa1: [ inv_obj.Inventory( resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ], # The sharing provider gets a bunch of disk ss: [ inv_obj.Inventory( resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ], } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid) after_allocs = [ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider alloc_obj.Allocation( resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), alloc_obj.Allocation( resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider alloc_obj.Allocation( resource_provider=cn1_numa1, resource_class='VCPU', consumer=i2_consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=i2_consumer, used=1024), alloc_obj.Allocation( resource_provider=ss, resource_class='DISK_GB', consumer=i2_consumer, used=100), ] reshaper.reshape(self.ctx, after_inventories, after_allocs) # Verify that the inventories have been moved to the appropriate # providers in the AFTER scenario # The root compute node should only have MEMORY_MB, nothing else cn1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1) self.assertEqual(1, len(cn1_inv)) self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class) self.assertEqual(32768, cn1_inv[0].total) # Each NUMA node should only have half the original VCPU, nothing else numa0_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa0) self.assertEqual(1, len(numa0_inv)) self.assertEqual('VCPU', numa0_inv[0].resource_class) self.assertEqual(8, numa0_inv[0].total) numa1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa1) self.assertEqual(1, len(numa1_inv)) self.assertEqual('VCPU', numa1_inv[0].resource_class) self.assertEqual(8, numa1_inv[0].total) # The sharing storage provider should only have DISK_GB, nothing else ss_inv = inv_obj.get_all_by_resource_provider(self.ctx, ss) self.assertEqual(1, len(ss_inv)) self.assertEqual('DISK_GB', ss_inv[0].resource_class) self.assertEqual(100000, ss_inv[0].total) # Verify we have the allocations we expect for the AFTER scenario after_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid) self.assertEqual(3, len(after_allocs_i1)) # Our VCPU allocation should be in the NUMA0 node vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid) after_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid) self.assertEqual(3, len(after_allocs_i2)) # Our VCPU allocation should be in the NUMA1 node vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
def test_reshape_concurrent_inventory_update(self): """Valid failure scenario for reshape(). We test a situation where the virt driver has constructed it's "after inventories and allocations" and sent those to the POST /reshape endpoint. The reshape POST handler does a quick check of the resource provider generations sent in the payload and they all check out. However, right before the call to resource_provider.reshape(), another thread legitimately changes the inventory of one of the providers involved in the reshape transaction. We should get a ConcurrentUpdateDetected in this case. """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer( self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() # then all our original providers cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate an instance on our compute node allocs = [ alloc_obj.Allocation( resource_provider=cn1, resource_class='VCPU', consumer=i1_consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), alloc_obj.Allocation( resource_provider=cn1, resource_class='DISK_GB', consumer=i1_consumer, used=100), ] alloc_obj.replace_all(self.ctx, allocs) # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: [ inv_obj.Inventory( resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ], # each NUMA node gets half of the CPUs cn1_numa0: [ inv_obj.Inventory( resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ], cn1_numa1: [ inv_obj.Inventory( resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ], # The sharing provider gets a bunch of disk ss: [ inv_obj.Inventory( resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ], } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) after_allocs = [ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider alloc_obj.Allocation( resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), alloc_obj.Allocation( resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), ] # OK, now before we call reshape(), here we emulate another thread # changing the inventory for the sharing storage provider in between # the time in the REST handler when the sharing storage provider's # generation was validated and the actual call to reshape() ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid) # Reduce the amount of storage to 2000, from 100000. new_ss_inv = [ inv_obj.Inventory( resource_provider=ss_threadB, resource_class='DISK_GB', total=2000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0)] ss_threadB.set_inventory(new_ss_inv) # Double check our storage provider's generation is now greater than # the original storage provider record being sent to reshape() self.assertGreater(ss_threadB.generation, ss.generation) # And we should legitimately get a failure now to reshape() due to # another thread updating one of the involved provider's generations self.assertRaises( exception.ConcurrentUpdateDetected, reshaper.reshape, self.ctx, after_inventories, after_allocs)
def test_provider_modify_inventory(self): rp = self._create_provider(uuidsentinel.rp_name) saved_generation = rp.generation disk_inv = tb.add_inventory(rp, orc.DISK_GB, 1024, reserved=15, min_unit=10, max_unit=100, step_size=10) vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12, allocation_ratio=16.0) # generation has bumped once for each add self.assertEqual(saved_generation + 2, rp.generation) saved_generation = rp.generation new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) self.assertEqual(2, len(new_inv_list)) resource_classes = [inv.resource_class for inv in new_inv_list] self.assertIn(orc.VCPU, resource_classes) self.assertIn(orc.DISK_GB, resource_classes) # reset inventory to just disk_inv rp.set_inventory([disk_inv]) # generation has bumped self.assertEqual(saved_generation + 1, rp.generation) saved_generation = rp.generation new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) self.assertEqual(1, len(new_inv_list)) resource_classes = [inv.resource_class for inv in new_inv_list] self.assertNotIn(orc.VCPU, resource_classes) self.assertIn(orc.DISK_GB, resource_classes) self.assertEqual(1024, new_inv_list[0].total) # update existing disk inv to new settings disk_inv = inv_obj.Inventory( resource_provider=rp, resource_class=orc.DISK_GB, total=2048, reserved=15, min_unit=10, max_unit=100, step_size=10, allocation_ratio=1.0) rp.update_inventory(disk_inv) # generation has bumped self.assertEqual(saved_generation + 1, rp.generation) saved_generation = rp.generation new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) self.assertEqual(1, len(new_inv_list)) self.assertEqual(2048, new_inv_list[0].total) # delete inventory rp.delete_inventory(orc.DISK_GB) # generation has bumped self.assertEqual(saved_generation + 1, rp.generation) saved_generation = rp.generation new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) result = inv_obj.find(new_inv_list, orc.DISK_GB) self.assertIsNone(result) self.assertRaises(exception.NotFound, rp.delete_inventory, orc.DISK_GB) # check inventory list is empty inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) self.assertEqual(0, len(inv_list)) # add some inventory rp.add_inventory(vcpu_inv) inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp) self.assertEqual(1, len(inv_list)) # generation has bumped self.assertEqual(saved_generation + 1, rp.generation) saved_generation = rp.generation # add same inventory again self.assertRaises(db_exc.DBDuplicateEntry, rp.add_inventory, vcpu_inv) # generation has not bumped self.assertEqual(saved_generation, rp.generation) # fail when generation wrong rp.generation = rp.generation - 1 self.assertRaises(exception.ConcurrentUpdateDetected, rp.set_inventory, inv_list)