def test_find(self): rp = resource_provider.ResourceProvider(uuid=uuids.rp_uuid) inv_list = resource_provider.InventoryList(objects=[ resource_provider.Inventory( resource_provider=rp, resource_class=fields.ResourceClass.VCPU, total=24), resource_provider.Inventory( resource_provider=rp, resource_class=fields.ResourceClass.MEMORY_MB, total=10240), ]) found = inv_list.find(fields.ResourceClass.MEMORY_MB) self.assertIsNotNone(found) self.assertEqual(10240, found.total) found = inv_list.find(fields.ResourceClass.VCPU) self.assertIsNotNone(found) self.assertEqual(24, found.total) found = inv_list.find(fields.ResourceClass.DISK_GB) self.assertIsNone(found) # Try an integer resource class identifier... self.assertRaises(ValueError, inv_list.find, VCPU_ID) # Use an invalid string... self.assertIsNone(inv_list.find('HOUSE'))
def start_fixture(self): super(NonSharedStorageFixture, self).start_fixture() self.context = context.get_admin_context() cn1_uuid = uuidutils.generate_uuid() cn2_uuid = uuidutils.generate_uuid() aggA_uuid = uuidutils.generate_uuid() aggB_uuid = uuidutils.generate_uuid() aggC_uuid = uuidutils.generate_uuid() os.environ['CN1_UUID'] = cn1_uuid os.environ['CN2_UUID'] = cn2_uuid os.environ['AGGA_UUID'] = aggA_uuid os.environ['AGGB_UUID'] = aggB_uuid os.environ['AGGC_UUID'] = aggC_uuid cn1 = rp_obj.ResourceProvider(self.context, name='cn1', uuid=cn1_uuid) cn1.create() cn2 = rp_obj.ResourceProvider(self.context, name='cn2', uuid=cn2_uuid) cn2.create() # Populate compute node inventory for VCPU and RAM for cn in (cn1, cn2): vcpu_inv = rp_obj.Inventory(self.context, resource_provider=cn, resource_class='VCPU', total=24, reserved=0, max_unit=24, min_unit=1, step_size=1, allocation_ratio=16.0) vcpu_inv.obj_set_defaults() ram_inv = rp_obj.Inventory(self.context, resource_provider=cn, resource_class='MEMORY_MB', total=128 * 1024, reserved=0, max_unit=128 * 1024, min_unit=256, step_size=256, allocation_ratio=1.5) ram_inv.obj_set_defaults() disk_inv = rp_obj.Inventory(self.context, resource_provider=cn, resource_class='DISK_GB', total=2000, reserved=100, max_unit=2000, min_unit=10, step_size=10, allocation_ratio=1.0) disk_inv.obj_set_defaults() inv_list = rp_obj.InventoryList( objects=[vcpu_inv, ram_inv, disk_inv]) cn.set_inventory(inv_list)
def add_inventory(rp, rc, total, **kwargs): kwargs.setdefault('max_unit', total) inv = rp_obj.Inventory(rp._context, resource_provider=rp, resource_class=rc, total=total, **kwargs) inv.obj_set_defaults() rp.add_inventory(inv) return inv
def test_create_with_id_fails(self): rp = resource_provider.ResourceProvider(context=self.context, uuid=_RESOURCE_PROVIDER_UUID, name=_RESOURCE_PROVIDER_NAME) rp.create() inv = resource_provider.Inventory(context=self.context, resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16, reserved=2, min_unit=1, max_unit=8, step_size=1, allocation_ratio=1.0) inv_list = resource_provider.InventoryList(context=self.context, objects=[inv]) rp.set_inventory(inv_list) obj = resource_provider.Allocation(context=self.context, id=99, resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, consumer_id=uuids.fake_instance, used=8) alloc_list = resource_provider.AllocationList(self.context, objects=[obj]) self.assertRaises(exception.ObjectActionError, alloc_list.create_all)
def test_create(self, mock_ensure_cache): rp = resource_provider.ResourceProvider(context=self.context, uuid=_RESOURCE_PROVIDER_UUID, name=_RESOURCE_PROVIDER_NAME) rp.create() inv = resource_provider.Inventory(context=self.context, resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16, reserved=2, min_unit=1, max_unit=8, step_size=1, allocation_ratio=1.0) inv_list = resource_provider.InventoryList(context=self.context, objects=[inv]) rp.set_inventory(inv_list) obj = resource_provider.Allocation(context=self.context, resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, consumer_id=uuids.fake_instance, used=8) alloc_list = resource_provider.AllocationList(self.context, objects=[obj]) alloc_list.create_all() rp_al = resource_provider.AllocationList saved_allocations = rp_al.get_all_by_resource_provider( self.context, rp) self.assertEqual(1, len(saved_allocations)) self.assertEqual(obj.used, saved_allocations[0].used)
def _create_resource_provider(self, inventory): """Helper method to create a resource provider with inventory""" ctxt = context.get_admin_context() rp_uuid = uuidutils.generate_uuid() rp = rp_obj.ResourceProvider(context=ctxt, name=rp_uuid, uuid=rp_uuid) rp.create() inv = rp_obj.Inventory(context=ctxt, resource_provider=rp, **inventory) inv_list = rp_obj.InventoryList(objects=[inv]) rp.set_inventory(inv_list) return rp
def _make_allocation(self, inv_dict, alloc_dict): rp = self._create_provider('allocation_resource_provider') disk_inv = rp_obj.Inventory(context=self.ctx, resource_provider=rp, **inv_dict) inv_list = rp_obj.InventoryList(objects=[disk_inv]) rp.set_inventory(inv_list) alloc = rp_obj.Allocation(self.ctx, resource_provider=rp, **alloc_dict) alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc]) alloc_list.create_all() return rp, alloc
def test_set_defaults(self): rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, uuid=_RESOURCE_PROVIDER_UUID) kwargs = dict(resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16) inv = resource_provider.Inventory(self.context, **kwargs) inv.obj_set_defaults() self.assertEqual(0, inv.reserved) self.assertEqual(1, inv.min_unit) self.assertEqual(1, inv.max_unit) self.assertEqual(1, inv.step_size) self.assertEqual(1.0, inv.allocation_ratio)
def _make_allocation(self, inv_dict, alloc_dict): rp = self._create_provider('allocation_resource_provider') disk_inv = rp_obj.Inventory(context=self.ctx, resource_provider=rp, **inv_dict) inv_list = rp_obj.InventoryList(objects=[disk_inv]) rp.set_inventory(inv_list) consumer_id = alloc_dict['consumer_id'] consumer = ensure_consumer( self.ctx, self.user_obj, self.project_obj, consumer_id) alloc = rp_obj.Allocation(self.ctx, resource_provider=rp, consumer=consumer, **alloc_dict) alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc]) alloc_list.replace_all() return rp, alloc
def test_capacity(self): rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, uuid=_RESOURCE_PROVIDER_UUID) kwargs = dict(resource_provider=rp, resource_class=_RESOURCE_CLASS_NAME, total=16, reserved=16) inv = resource_provider.Inventory(self.context, **kwargs) inv.obj_set_defaults() self.assertEqual(0, inv.capacity) inv.reserved = 15 self.assertEqual(1, inv.capacity) inv.allocation_ratio = 2.0 self.assertEqual(2, inv.capacity)
def make_inventory_object(resource_provider, resource_class, **data): """Single place to catch malformed Inventories.""" # TODO(cdent): Some of the validation checks that are done here # could be done via JSONschema (using, for example, "minimum": # 0) for non-negative integers. It's not clear if that is # duplication or decoupling so leaving it as this for now. try: inventory = rp_obj.Inventory( resource_provider=resource_provider, resource_class=resource_class, **data) except (ValueError, TypeError) as exc: raise webob.exc.HTTPBadRequest( _('Bad inventory %(class)s for resource provider ' '%(rp_uuid)s: %(error)s') % {'class': resource_class, 'rp_uuid': resource_provider.uuid, 'error': exc}) return inventory
def _make_allocation(self, inv_dict, alloc_dict): rp = self._create_provider('allocation_resource_provider') disk_inv = rp_obj.Inventory(context=self.ctx, resource_provider=rp, **inv_dict) inv_list = rp_obj.InventoryList(objects=[disk_inv]) rp.set_inventory(inv_list) consumer_id = alloc_dict['consumer_id'] try: c = consumer_obj.Consumer.get_by_uuid(self.ctx, consumer_id) except exception.NotFound: c = consumer_obj.Consumer( self.ctx, uuid=consumer_id, user=self.user_obj, project=self.project_obj) c.create() alloc = rp_obj.Allocation(self.ctx, resource_provider=rp, consumer=c, **alloc_dict) alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc]) alloc_list.replace_all() return rp, alloc
def start_fixture(self): super(AllocationFixture, self).start_fixture() self.context = context.get_admin_context() # For use creating and querying allocations/usages os.environ['ALT_USER_ID'] = uuidutils.generate_uuid() project_id = os.environ['PROJECT_ID'] user_id = os.environ['USER_ID'] alt_user_id = os.environ['ALT_USER_ID'] user = user_obj.User(self.context, external_id=user_id) user.create() alt_user = user_obj.User(self.context, external_id=alt_user_id) alt_user.create() project = project_obj.Project(self.context, external_id=project_id) project.create() # Stealing from the super rp_name = os.environ['RP_NAME'] rp_uuid = os.environ['RP_UUID'] rp = rp_obj.ResourceProvider(self.context, name=rp_name, uuid=rp_uuid) rp.create() # Create a first consumer for the DISK_GB consumer_id = uuidutils.generate_uuid() consumer = consumer_obj.Consumer(self.context, uuid=consumer_id, user=user, project=project) consumer.create() # Create some DISK_GB inventory and allocations. inventory = rp_obj.Inventory(self.context, resource_provider=rp, resource_class='DISK_GB', total=2048, step_size=10, min_unit=10, max_unit=600) inventory.obj_set_defaults() rp.add_inventory(inventory) alloc1 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='DISK_GB', consumer=consumer, used=500) alloc2 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='DISK_GB', consumer=consumer, used=500) alloc_list = rp_obj.AllocationList(self.context, objects=[alloc1, alloc2]) alloc_list.create_all() # Create a second consumer for the VCPU consumer_id = uuidutils.generate_uuid() consumer = consumer_obj.Consumer(self.context, uuid=consumer_id, user=user, project=project) consumer.create() # This consumer is referenced from the gabbits os.environ['CONSUMER_ID'] = consumer_id # Create some VCPU inventory and allocations. inventory = rp_obj.Inventory(self.context, resource_provider=rp, resource_class='VCPU', total=10, max_unit=4) inventory.obj_set_defaults() rp.add_inventory(inventory) alloc1 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='VCPU', consumer=consumer, used=2) alloc2 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='VCPU', consumer=consumer, used=4) alloc_list = rp_obj.AllocationList(self.context, objects=[alloc1, alloc2]) alloc_list.create_all() # Create a consumer object for a different user alt_consumer_id = uuidutils.generate_uuid() alt_consumer = consumer_obj.Consumer(self.context, uuid=alt_consumer_id, user=alt_user, project=project) alt_consumer.create() os.environ['ALT_CONSUMER_ID'] = alt_consumer_id # Create a couple of allocations for a different user. alloc1 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='DISK_GB', consumer=alt_consumer, used=20) alloc2 = rp_obj.Allocation(self.context, resource_provider=rp, resource_class='VCPU', consumer=alt_consumer, used=1) alloc_list = rp_obj.AllocationList(self.context, objects=[alloc1, alloc2]) alloc_list.create_all() # The ALT_RP_XXX variables are for a resource provider that has # not been created in the Allocation fixture os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid() os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
def test_reshape_concurrent_inventory_update(self): """Valid failure scenario for reshape(). We test a situation where the virt driver has constructed it's "after inventories and allocations" and sent those to the POST /reshape endpoint. The reshape POST handler does a quick check of the resource provider generations sent in the payload and they all check out. However, right before the call to resource_provider.reshape(), another thread legitimately changes the inventory of one of the providers involved in the reshape transaction. We should get a ConcurrentUpdateDetected in this case. """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer(self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() # then all our original providers cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate an instance on our compute node allocs = [ rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='VCPU', consumer=i1_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='DISK_GB', consumer=i1_consumer, used=100), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # each NUMA node gets half of the CPUs cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # The sharing provider gets a bunch of disk ss: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory(self.ctx, resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ]), } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) after_allocs = rp_obj.AllocationList( self.ctx, objects=[ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider rp_obj.Allocation(self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), ]) # OK, now before we call reshape(), here we emulate another thread # changing the inventory for the sharing storage provider in between # the time in the REST handler when the sharing storage provider's # generation was validated and the actual call to reshape() ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid) # Reduce the amount of storage to 2000, from 100000. new_ss_inv = rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=ss_threadB, resource_class='DISK_GB', total=2000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0) ]) ss_threadB.set_inventory(new_ss_inv) # Double check our storage provider's generation is now greater than # the original storage provider record being sent to reshape() self.assertGreater(ss_threadB.generation, ss.generation) # And we should legitimately get a failure now to reshape() due to # another thread updating one of the involved provider's generations self.assertRaises(exception.ConcurrentUpdateDetected, rp_obj.reshape, self.ctx, after_inventories, after_allocs)
def test_reshape(self): """We set up the following scenario: BEFORE: single compute node setup A single compute node with: - VCPU, MEMORY_MB, DISK_GB inventory - Two instances consuming CPU, RAM and DISK from that compute node AFTER: hierarchical + shared storage setup A compute node parent provider with: - MEMORY_MB Two NUMA node child providers containing: - VCPU Shared storage provider with: - DISK_GB Both instances have their resources split among the providers and shared storage accordingly """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer(self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() i2_uuid = uuids.instance2 i2_consumer = consumer_obj.Consumer(self.ctx, uuid=i2_uuid, user=self.user_obj, project=self.project_obj) i2_consumer.create() cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate both instances against the single compute node for consumer in (i1_consumer, i2_consumer): allocs = [ rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='VCPU', consumer=consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='DISK_GB', consumer=consumer, used=100), ] alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) alloc_list.replace_all() # Verify we have the allocations we expect for the BEFORE scenario before_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i1_uuid) self.assertEqual(3, len(before_allocs_i1)) self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid) before_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i2_uuid) self.assertEqual(3, len(before_allocs_i2)) self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid) # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # each NUMA node gets half of the CPUs cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory( self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ]), # The sharing provider gets a bunch of disk ss: rp_obj.InventoryList(self.ctx, objects=[ rp_obj.Inventory(self.ctx, resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ]), } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid) after_allocs = rp_obj.AllocationList( self.ctx, objects=[ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider rp_obj.Allocation(self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider rp_obj.Allocation(self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', consumer=i2_consumer, used=2), rp_obj.Allocation(self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', consumer=i2_consumer, used=1024), rp_obj.Allocation(self.ctx, resource_provider=ss, resource_class='DISK_GB', consumer=i2_consumer, used=100), ]) rp_obj.reshape(self.ctx, after_inventories, after_allocs) # Verify that the inventories have been moved to the appropriate # providers in the AFTER scenario # The root compute node should only have MEMORY_MB, nothing else cn1_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, cn1) self.assertEqual(1, len(cn1_inv)) self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class) self.assertEqual(32768, cn1_inv[0].total) # Each NUMA node should only have half the original VCPU, nothing else numa0_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, cn1_numa0) self.assertEqual(1, len(numa0_inv)) self.assertEqual('VCPU', numa0_inv[0].resource_class) self.assertEqual(8, numa0_inv[0].total) numa1_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, cn1_numa1) self.assertEqual(1, len(numa1_inv)) self.assertEqual('VCPU', numa1_inv[0].resource_class) self.assertEqual(8, numa1_inv[0].total) # The sharing storage provider should only have DISK_GB, nothing else ss_inv = rp_obj.InventoryList.get_all_by_resource_provider( self.ctx, ss) self.assertEqual(1, len(ss_inv)) self.assertEqual('DISK_GB', ss_inv[0].resource_class) self.assertEqual(100000, ss_inv[0].total) # Verify we have the allocations we expect for the AFTER scenario after_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i1_uuid) self.assertEqual(3, len(after_allocs_i1)) # Our VCPU allocation should be in the NUMA0 node vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid) after_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id( self.ctx, i2_uuid) self.assertEqual(3, len(after_allocs_i2)) # Our VCPU allocation should be in the NUMA1 node vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
def start_fixture(self): super(SharedStorageFixture, self).start_fixture() self.context = context.get_admin_context() cn1_uuid = uuidutils.generate_uuid() cn2_uuid = uuidutils.generate_uuid() ss_uuid = uuidutils.generate_uuid() agg_uuid = uuidutils.generate_uuid() os.environ['CN1_UUID'] = cn1_uuid os.environ['CN2_UUID'] = cn2_uuid os.environ['SS_UUID'] = ss_uuid os.environ['AGG_UUID'] = agg_uuid cn1 = rp_obj.ResourceProvider( self.context, name='cn1', uuid=cn1_uuid) cn1.create() cn2 = rp_obj.ResourceProvider( self.context, name='cn2', uuid=cn2_uuid) cn2.create() ss = rp_obj.ResourceProvider( self.context, name='ss', uuid=ss_uuid) ss.create() # Populate compute node inventory for VCPU and RAM for cn in (cn1, cn2): vcpu_inv = rp_obj.Inventory( self.context, resource_provider=cn, resource_class='VCPU', total=24, reserved=0, max_unit=24, min_unit=1, step_size=1, allocation_ratio=16.0) vcpu_inv.obj_set_defaults() ram_inv = rp_obj.Inventory( self.context, resource_provider=cn, resource_class='MEMORY_MB', total=128 * 1024, reserved=0, max_unit=128 * 1024, min_unit=256, step_size=256, allocation_ratio=1.5) ram_inv.obj_set_defaults() inv_list = rp_obj.InventoryList(objects=[vcpu_inv, ram_inv]) cn.set_inventory(inv_list) t_avx_sse = rp_obj.Trait.get_by_name(self.context, "HW_CPU_X86_SSE") t_avx_sse2 = rp_obj.Trait.get_by_name(self.context, "HW_CPU_X86_SSE2") cn1.set_traits(rp_obj.TraitList(objects=[t_avx_sse, t_avx_sse2])) # Populate shared storage provider with DISK_GB inventory disk_inv = rp_obj.Inventory( self.context, resource_provider=ss, resource_class='DISK_GB', total=2000, reserved=100, max_unit=2000, min_unit=10, step_size=10, allocation_ratio=1.0) disk_inv.obj_set_defaults() inv_list = rp_obj.InventoryList(objects=[disk_inv]) ss.set_inventory(inv_list) # Mark the shared storage pool as having inventory shared among any # provider associated via aggregate t = rp_obj.Trait.get_by_name( self.context, "MISC_SHARES_VIA_AGGREGATE", ) ss.set_traits(rp_obj.TraitList(objects=[t])) # Now associate the shared storage pool and both compute nodes with the # same aggregate cn1.set_aggregates([agg_uuid]) cn2.set_aggregates([agg_uuid]) ss.set_aggregates([agg_uuid])