def test_create_incomplete_consumers(self): """Test the online data migration that creates incomplete consumer records along with the incomplete consumer project/user records. """ self._create_incomplete_allocations(self.ctx) # We do a "really online" online data migration for incomplete # consumers when calling alloc_obj.get_all_by_consumer_id() and # alloc_obj.get_all_by_resource_provider() and there are still # incomplete consumer records. So, to simulate a situation where the # operator has yet to run the nova-manage online_data_migration CLI # tool completely, we first call # consumer_obj.create_incomplete_consumers() with a batch size of 1. # This should mean there will be two allocation records still remaining # with a missing consumer record (since we create 3 total to begin # with). We then query the allocations table directly to grab that # consumer UUID in the allocations table that doesn't refer to a # consumer table record and call # alloc_obj.get_all_by_consumer_id() with that consumer UUID. This # should create the remaining missing consumer record "inline" in the # alloc_obj.get_all_by_consumer_id() method. # After that happens, there should still be a single allocation record # that is missing a relation to the consumers table. We call the # alloc_obj.get_all_by_resource_provider() method and verify that # method cleans up the remaining incomplete consumers relationship. res = consumer_obj.create_incomplete_consumers(self.ctx, 1) self.assertEqual((1, 1), res) # Grab the consumer UUID for the allocation record with a # still-incomplete consumer record. res = _get_allocs_with_no_consumer_relationship(self.ctx) self.assertEqual(2, len(res)) still_missing = res[0][0] alloc_obj.get_all_by_consumer_id(self.ctx, still_missing) # There should still be a single missing consumer relationship. Let's # grab that and call alloc_obj.get_all_by_resource_provider() # which should clean that last one up for us. res = _get_allocs_with_no_consumer_relationship(self.ctx) self.assertEqual(1, len(res)) still_missing = res[0][0] rp1 = rp_obj.ResourceProvider(self.ctx, id=1) alloc_obj.get_all_by_resource_provider(self.ctx, rp1) # get_all_by_resource_provider() should have auto-completed the still # missing consumer record and _check_incomplete_consumers() should # assert correctly that there are no more incomplete consumer records. self._check_incomplete_consumers(self.ctx) res = consumer_obj.create_incomplete_consumers(self.ctx, 10) self.assertEqual((0, 0), res)
def delete_allocations(req): context = req.environ['placement.context'] context.can(policies.ALLOC_DELETE) consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid') allocations = alloc_obj.get_all_by_consumer_id(context, consumer_uuid) if allocations: try: alloc_obj.delete_all(context, allocations) # NOTE(pumaranikar): Following NotFound exception added in the case # when allocation is deleted from allocations list by some other # activity. In that case, delete_all() will throw a NotFound exception. except exception.NotFound as exc: raise webob.exc.HTTPNotFound( "Allocation for consumer with id %(id)s not found. error: " "%(error)s" % {'id': consumer_uuid, 'error': exc}) else: raise webob.exc.HTTPNotFound( "No allocations for consumer '%(consumer_uuid)s'" % {'consumer_uuid': consumer_uuid}) LOG.debug("Successfully deleted allocations %s", allocations) req.response.status = 204 req.response.content_type = None return req.response
def create_allocation_list(context, data, consumers): """Create a list of Allocations based on provided data. :param context: The placement context. :param data: A dictionary of multiple allocations by consumer uuid. :param consumers: A dictionary, keyed by consumer UUID, of Consumer objects :return: A list of Allocation objects. :raises: `webob.exc.HTTPBadRequest` if a resource provider included in the allocations does not exist. """ allocation_objects = [] for consumer_uuid in data: allocations = data[consumer_uuid]['allocations'] consumer = consumers[consumer_uuid] if allocations: rp_objs = _resource_providers_by_uuid(context, allocations.keys()) for resource_provider_uuid in allocations: resource_provider = rp_objs[resource_provider_uuid] resources = allocations[resource_provider_uuid]['resources'] new_allocations = _new_allocations(context, resource_provider, consumer, resources) allocation_objects.extend(new_allocations) else: # The allocations are empty, which means wipe them out. # Internal to the allocation object this is signalled by a # used value of 0. allocations = alloc_obj.get_all_by_consumer_id( context, consumer_uuid) for allocation in allocations: allocation.used = 0 allocation_objects.append(allocation) return allocation_objects
def list_for_consumer(req): """List allocations associated with a consumer.""" context = req.environ['placement.context'] context.can(policies.ALLOC_LIST) consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid') want_version = req.environ[microversion.MICROVERSION_ENVIRON] # NOTE(cdent): There is no way for a 404 to be returned here, # only an empty result. We do not have a way to validate a # consumer id. allocations = alloc_obj.get_all_by_consumer_id(context, consumer_id) output = _serialize_allocations_for_consumer(context, allocations, want_version) last_modified = _last_modified_from_allocations(allocations, want_version) allocations_json = jsonutils.dumps(output) response = req.response response.status = 200 response.body = encodeutils.to_utf8(allocations_json) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response
def test_get_all_by_consumer_id(self, mock_get_allocations_from_db): allocations = alloc_obj.get_all_by_consumer_id(self.context, uuids.consumer) self.assertEqual(1, len(allocations)) mock_get_allocations_from_db.assert_called_once_with( self.context, uuids.consumer) self.assertEqual(_ALLOCATION_BY_CONSUMER_DB['used'], allocations[0].used) self.assertEqual(_ALLOCATION_BY_CONSUMER_DB['created_at'], allocations[0].created_at) self.assertEqual(_ALLOCATION_BY_CONSUMER_DB['updated_at'], allocations[0].updated_at)
def test_delete_consumer_if_no_allocs(self): """alloc_obj.replace_all() should attempt to delete consumers that no longer have any allocations. Due to the REST API not having any way to query for consumers directly (only via the GET /allocations/{consumer_uuid} endpoint which returns an empty dict even when no consumer record exists for the {consumer_uuid}) we need to do this functional test using only the object layer. """ # We will use two consumers in this test, only one of which will get # all of its allocations deleted in a transaction (and we expect that # consumer record to be deleted) c1 = consumer_obj.Consumer(self.ctx, uuid=uuids.consumer1, user=self.user_obj, project=self.project_obj) c1.create() c2 = consumer_obj.Consumer(self.ctx, uuid=uuids.consumer2, user=self.user_obj, project=self.project_obj) c2.create() # Create some inventory that we will allocate cn1 = self._create_provider('cn1') tb.add_inventory(cn1, orc.VCPU, 8) tb.add_inventory(cn1, orc.MEMORY_MB, 2048) tb.add_inventory(cn1, orc.DISK_GB, 2000) # Now allocate some of that inventory to two different consumers allocs = [ alloc_obj.Allocation(consumer=c1, resource_provider=cn1, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=c1, resource_provider=cn1, resource_class=orc.MEMORY_MB, used=512), alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.VCPU, used=1), alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.MEMORY_MB, used=512), ] alloc_obj.replace_all(self.ctx, allocs) # Validate that we have consumer records for both consumers for c_uuid in (uuids.consumer1, uuids.consumer2): c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid) self.assertIsNotNone(c_obj) # OK, now "remove" the allocation for consumer2 by setting the used # value for both allocated resources to 0 and re-running the # alloc_obj.replace_all(). This should end up deleting the # consumer record for consumer2 allocs = [ alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.VCPU, used=0), alloc_obj.Allocation(consumer=c2, resource_provider=cn1, resource_class=orc.MEMORY_MB, used=0), ] alloc_obj.replace_all(self.ctx, allocs) # consumer1 should still exist... c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1) self.assertIsNotNone(c_obj) # but not consumer2... self.assertRaises(exception.NotFound, consumer_obj.Consumer.get_by_uuid, self.ctx, uuids.consumer2) # DELETE /allocations/{consumer_uuid} is the other place where we # delete all allocations for a consumer. Let's delete all for consumer1 # and check that the consumer record is deleted alloc_list = alloc_obj.get_all_by_consumer_id(self.ctx, uuids.consumer1) alloc_obj.delete_all(self.ctx, alloc_list) # consumer1 should no longer exist in the DB since we just deleted all # of its allocations self.assertRaises(exception.NotFound, consumer_obj.Consumer.get_by_uuid, self.ctx, uuids.consumer1)
def test_multi_provider_allocation(self): """Tests that an allocation that includes more than one resource provider can be created, listed and deleted properly. Bug #1707669 highlighted a situation that arose when attempting to remove part of an allocation for a source host during a resize operation where the exiting allocation was not being properly deleted. """ cn_source = self._create_provider('cn_source') cn_dest = self._create_provider('cn_dest') # Add same inventory to both source and destination host for cn in (cn_source, cn_dest): tb.add_inventory(cn, orc.VCPU, 24, allocation_ratio=16.0) tb.add_inventory(cn, orc.MEMORY_MB, 1024, min_unit=64, max_unit=1024, step_size=64, allocation_ratio=1.5) # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, project=self.project_obj) inst_consumer.create() # Now create an allocation that represents a move operation where the # scheduler has selected cn_dest as the target host and created a # "doubled-up" allocation for the duration of the move operation alloc_list = [ alloc_obj.Allocation( consumer=inst_consumer, resource_provider=cn_source, resource_class=orc.VCPU, used=1), alloc_obj.Allocation( consumer=inst_consumer, resource_provider=cn_source, resource_class=orc.MEMORY_MB, used=256), alloc_obj.Allocation( consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.VCPU, used=1), alloc_obj.Allocation( consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.MEMORY_MB, used=256), ] alloc_obj.replace_all(self.ctx, alloc_list) src_allocs = alloc_obj.get_all_by_resource_provider( self.ctx, cn_source) self.assertEqual(2, len(src_allocs)) dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest) self.assertEqual(2, len(dest_allocs)) consumer_allocs = alloc_obj.get_all_by_consumer_id( self.ctx, uuidsentinel.instance) self.assertEqual(4, len(consumer_allocs)) # Validate that when we create an allocation for a consumer that we # delete any existing allocation and replace it with what the new. # Here, we're emulating the step that occurs on confirm_resize() where # the source host pulls the existing allocation for the instance and # removes any resources that refer to itself and saves the allocation # back to placement new_alloc_list = [ alloc_obj.Allocation( consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.VCPU, used=1), alloc_obj.Allocation( consumer=inst_consumer, resource_provider=cn_dest, resource_class=orc.MEMORY_MB, used=256), ] alloc_obj.replace_all(self.ctx, new_alloc_list) src_allocs = alloc_obj.get_all_by_resource_provider( self.ctx, cn_source) self.assertEqual(0, len(src_allocs)) dest_allocs = alloc_obj.get_all_by_resource_provider( self.ctx, cn_dest) self.assertEqual(2, len(dest_allocs)) consumer_allocs = alloc_obj.get_all_by_consumer_id( self.ctx, uuidsentinel.instance) self.assertEqual(2, len(consumer_allocs))
def test_create_and_clear(self): """Test that a used of 0 in an allocation wipes allocations.""" consumer_uuid = uuidsentinel.consumer # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=consumer_uuid, user=self.user_obj, project=self.project_obj) inst_consumer.create() rp_class = orc.DISK_GB target_rp = self._make_rp_and_inventory(resource_class=rp_class, max_unit=500) # Create two allocations with values and confirm the resulting # usage is as expected. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=100) allocation2 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(2, len(allocations)) usage = sum(alloc.used for alloc in allocations) self.assertEqual(300, usage) # Create two allocations, one with 0 used, to confirm the # resulting usage is only of one. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=0) allocation2 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) # add a source rp and a migration consumer migration_uuid = uuidsentinel.migration # Create a consumer representing the migration mig_consumer = consumer_obj.Consumer( self.ctx, uuid=migration_uuid, user=self.user_obj, project=self.project_obj) mig_consumer.create() source_rp = self._make_rp_and_inventory( rp_name=uuidsentinel.source_name, rp_uuid=uuidsentinel.source_uuid, resource_class=rp_class, max_unit=500) # Create two allocations, one as the consumer, one as the # migration. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation2 = alloc_obj.Allocation( resource_provider=source_rp, consumer=mig_consumer, resource_class=rp_class, used=200) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) # Check primary consumer allocations. allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) # Check migration allocations. allocations = alloc_obj.get_all_by_consumer_id( self.ctx, migration_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) # Clear the migration and confirm the target. allocation1 = alloc_obj.Allocation( resource_provider=target_rp, consumer=inst_consumer, resource_class=rp_class, used=200) allocation2 = alloc_obj.Allocation( resource_provider=source_rp, consumer=mig_consumer, resource_class=rp_class, used=0) allocation_list = [allocation1, allocation2] alloc_obj.replace_all(self.ctx, allocation_list) allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid) self.assertEqual(1, len(allocations)) usage = allocations[0].used self.assertEqual(200, usage) allocations = alloc_obj.get_all_by_consumer_id( self.ctx, migration_uuid) self.assertEqual(0, len(allocations))
def test_allocation_list_create(self): max_unit = 10 consumer_uuid = uuidsentinel.consumer # Create a consumer representing the instance inst_consumer = consumer_obj.Consumer( self.ctx, uuid=consumer_uuid, user=self.user_obj, project=self.project_obj) inst_consumer.create() # Create two resource providers rp1_name = uuidsentinel.rp1_name rp1_uuid = uuidsentinel.rp1_uuid rp1_class = orc.DISK_GB rp1_used = 6 rp2_name = uuidsentinel.rp2_name rp2_uuid = uuidsentinel.rp2_uuid rp2_class = orc.IPV4_ADDRESS rp2_used = 2 rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) rp2 = self._create_provider(rp2_name, uuid=rp2_uuid) # Two allocations, one for each resource provider. allocation_1 = alloc_obj.Allocation( resource_provider=rp1, consumer=inst_consumer, resource_class=rp1_class, used=rp1_used) allocation_2 = alloc_obj.Allocation( resource_provider=rp2, consumer=inst_consumer, resource_class=rp2_class, used=rp2_used) allocation_list = [allocation_1, allocation_2] # There's no inventory, we have a failure. error = self.assertRaises(exception.InvalidInventory, alloc_obj.replace_all, self.ctx, allocation_list) # Confirm that the resource class string, not index, is in # the exception and resource providers are listed by uuid. self.assertIn(rp1_class, str(error)) self.assertIn(rp2_class, str(error)) self.assertIn(rp1.uuid, str(error)) self.assertIn(rp2.uuid, str(error)) # Add inventory for one of the two resource providers. This should also # fail, since rp2 has no inventory. tb.add_inventory(rp1, rp1_class, 1024, max_unit=1) self.assertRaises(exception.InvalidInventory, alloc_obj.replace_all, self.ctx, allocation_list) # Add inventory for the second resource provider tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1) # Now the allocations will still fail because max_unit 1 self.assertRaises(exception.InvalidAllocationConstraintsViolated, alloc_obj.replace_all, self.ctx, allocation_list) inv1 = inv_obj.Inventory(resource_provider=rp1, resource_class=rp1_class, total=1024, max_unit=max_unit) rp1.set_inventory([inv1]) inv2 = inv_obj.Inventory(resource_provider=rp2, resource_class=rp2_class, total=255, reserved=2, max_unit=max_unit) rp2.set_inventory([inv2]) # Now we can finally allocate. alloc_obj.replace_all(self.ctx, allocation_list) # Check that those allocations changed usage on each # resource provider. rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) rp2_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp2_uuid) self.assertEqual(rp1_used, rp1_usage[0].usage) self.assertEqual(rp2_used, rp2_usage[0].usage) # redo one allocation # TODO(cdent): This does not currently behave as expected # because a new allocation is created, adding to the total # used, not replacing. rp1_used += 1 self.allocate_from_provider( rp1, rp1_class, rp1_used, consumer=inst_consumer) rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) self.assertEqual(rp1_used, rp1_usage[0].usage) # delete the allocations for the consumer # NOTE(cdent): The database uses 'consumer_id' for the # column, presumably because some ids might not be uuids, at # some point in the future. consumer_allocations = alloc_obj.get_all_by_consumer_id( self.ctx, consumer_uuid) alloc_obj.delete_all(self.ctx, consumer_allocations) rp1_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp1_uuid) rp2_usage = usage_obj.get_all_by_resource_provider_uuid( self.ctx, rp2_uuid) self.assertEqual(0, rp1_usage[0].usage) self.assertEqual(0, rp2_usage[0].usage)
def _set_allocations_for_consumer(req, schema): context = req.environ['placement.context'] context.can(policies.ALLOC_UPDATE) consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid') if not uuidutils.is_uuid_like(consumer_uuid): raise webob.exc.HTTPBadRequest( 'Malformed consumer_uuid: %(consumer_uuid)s' % {'consumer_uuid': consumer_uuid}) consumer_uuid = str(uuid.UUID(consumer_uuid)) data = util.extract_json(req.body, schema) allocation_data = data['allocations'] # Normalize allocation data to dict. want_version = req.environ[microversion.MICROVERSION_ENVIRON] if not want_version.matches((1, 12)): allocations_dict = {} # Allocation are list-ish, transform to dict-ish for allocation in allocation_data: resource_provider_uuid = allocation['resource_provider']['uuid'] allocations_dict[resource_provider_uuid] = { 'resources': allocation['resources'] } allocation_data = allocations_dict allocation_objects = [] # Consumer object saved in case we need to delete the auto-created consumer # record consumer = None # Whether we created a new consumer record created_new_consumer = False if not allocation_data: # The allocations are empty, which means wipe them out. Internal # to the allocation object this is signalled by a used value of 0. # We still need to verify the consumer's generation, though, which # we do in _ensure_consumer() # NOTE(jaypipes): This will only occur 1.28+. The JSONSchema will # prevent an empty allocations object from being passed when there is # no consumer generation, so this is safe to do. data_util.ensure_consumer( context, consumer_uuid, data.get('project_id'), data.get('user_id'), data.get('consumer_generation'), want_version) allocations = alloc_obj.get_all_by_consumer_id(context, consumer_uuid) for allocation in allocations: allocation.used = 0 allocation_objects.append(allocation) else: # If the body includes an allocation for a resource provider # that does not exist, raise a 400. rp_objs = _resource_providers_by_uuid(context, allocation_data.keys()) consumer, created_new_consumer = data_util.ensure_consumer( context, consumer_uuid, data.get('project_id'), data.get('user_id'), data.get('consumer_generation'), want_version) for resource_provider_uuid, allocation in allocation_data.items(): resource_provider = rp_objs[resource_provider_uuid] new_allocations = _new_allocations(context, resource_provider, consumer, allocation['resources']) allocation_objects.extend(new_allocations) def _create_allocations(alloc_list): try: alloc_obj.replace_all(context, alloc_list) LOG.debug("Successfully wrote allocations %s", alloc_list) except Exception: with excutils.save_and_reraise_exception(): if created_new_consumer: delete_consumers([consumer]) try: _create_allocations(allocation_objects) # InvalidInventory is a parent for several exceptions that # indicate either that Inventory is not present, or that # capacity limits have been exceeded. except exception.NotFound as exc: raise webob.exc.HTTPBadRequest( "Unable to allocate inventory for consumer %(consumer_uuid)s: " "%(error)s" % {'consumer_uuid': consumer_uuid, 'error': exc}) except exception.InvalidInventory as exc: raise webob.exc.HTTPConflict( 'Unable to allocate inventory: %(error)s' % {'error': exc}) except exception.ConcurrentUpdateDetected as exc: raise webob.exc.HTTPConflict( 'Inventory and/or allocations changed while attempting to ' 'allocate: %(error)s' % {'error': exc}, comment=errors.CONCURRENT_UPDATE) req.response.status = 204 req.response.content_type = None return req.response
def test_reshape(self): """We set up the following scenario: BEFORE: single compute node setup A single compute node with: - VCPU, MEMORY_MB, DISK_GB inventory - Two instances consuming CPU, RAM and DISK from that compute node AFTER: hierarchical + shared storage setup A compute node parent provider with: - MEMORY_MB Two NUMA node child providers containing: - VCPU Shared storage provider with: - DISK_GB Both instances have their resources split among the providers and shared storage accordingly """ # First create our consumers i1_uuid = uuids.instance1 i1_consumer = consumer_obj.Consumer( self.ctx, uuid=i1_uuid, user=self.user_obj, project=self.project_obj) i1_consumer.create() i2_uuid = uuids.instance2 i2_consumer = consumer_obj.Consumer( self.ctx, uuid=i2_uuid, user=self.user_obj, project=self.project_obj) i2_consumer.create() cn1 = self._create_provider('cn1') tb.add_inventory(cn1, 'VCPU', 16) tb.add_inventory(cn1, 'MEMORY_MB', 32768) tb.add_inventory(cn1, 'DISK_GB', 1000) # Allocate both instances against the single compute node for consumer in (i1_consumer, i2_consumer): allocs = [ alloc_obj.Allocation( resource_provider=cn1, resource_class='VCPU', consumer=consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=consumer, used=1024), alloc_obj.Allocation( resource_provider=cn1, resource_class='DISK_GB', consumer=consumer, used=100), ] alloc_obj.replace_all(self.ctx, allocs) # Verify we have the allocations we expect for the BEFORE scenario before_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid) self.assertEqual(3, len(before_allocs_i1)) self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid) before_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid) self.assertEqual(3, len(before_allocs_i2)) self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid) # Before we issue the actual reshape() call, we need to first create # the child providers and sharing storage provider. These are actions # that the virt driver or external agent is responsible for performing # *before* attempting any reshape activity. cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) ss = self._create_provider('ss') # OK, now emulate the call to POST /reshaper that will be triggered by # a virt driver wanting to replace the world and change its modeling # from a single provider to a nested provider tree along with a sharing # storage provider. after_inventories = { # cn1 keeps the RAM only cn1: [ inv_obj.Inventory( resource_provider=cn1, resource_class='MEMORY_MB', total=32768, reserved=0, max_unit=32768, min_unit=1, step_size=1, allocation_ratio=1.0), ], # each NUMA node gets half of the CPUs cn1_numa0: [ inv_obj.Inventory( resource_provider=cn1_numa0, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ], cn1_numa1: [ inv_obj.Inventory( resource_provider=cn1_numa1, resource_class='VCPU', total=8, reserved=0, max_unit=8, min_unit=1, step_size=1, allocation_ratio=1.0), ], # The sharing provider gets a bunch of disk ss: [ inv_obj.Inventory( resource_provider=ss, resource_class='DISK_GB', total=100000, reserved=0, max_unit=1000, min_unit=1, step_size=1, allocation_ratio=1.0), ], } # We do a fetch from the DB for each instance to get its latest # generation. This would be done by the resource tracker or scheduler # report client before issuing the call to reshape() because the # consumers representing the two instances above will have had their # generations incremented in the original call to PUT # /allocations/{consumer_uuid} i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid) after_allocs = [ # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider alloc_obj.Allocation( resource_provider=cn1_numa0, resource_class='VCPU', consumer=i1_consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), alloc_obj.Allocation( resource_provider=ss, resource_class='DISK_GB', consumer=i1_consumer, used=100), # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB # from the sharing storage provider alloc_obj.Allocation( resource_provider=cn1_numa1, resource_class='VCPU', consumer=i2_consumer, used=2), alloc_obj.Allocation( resource_provider=cn1, resource_class='MEMORY_MB', consumer=i2_consumer, used=1024), alloc_obj.Allocation( resource_provider=ss, resource_class='DISK_GB', consumer=i2_consumer, used=100), ] reshaper.reshape(self.ctx, after_inventories, after_allocs) # Verify that the inventories have been moved to the appropriate # providers in the AFTER scenario # The root compute node should only have MEMORY_MB, nothing else cn1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1) self.assertEqual(1, len(cn1_inv)) self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class) self.assertEqual(32768, cn1_inv[0].total) # Each NUMA node should only have half the original VCPU, nothing else numa0_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa0) self.assertEqual(1, len(numa0_inv)) self.assertEqual('VCPU', numa0_inv[0].resource_class) self.assertEqual(8, numa0_inv[0].total) numa1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa1) self.assertEqual(1, len(numa1_inv)) self.assertEqual('VCPU', numa1_inv[0].resource_class) self.assertEqual(8, numa1_inv[0].total) # The sharing storage provider should only have DISK_GB, nothing else ss_inv = inv_obj.get_all_by_resource_provider(self.ctx, ss) self.assertEqual(1, len(ss_inv)) self.assertEqual('DISK_GB', ss_inv[0].resource_class) self.assertEqual(100000, ss_inv[0].total) # Verify we have the allocations we expect for the AFTER scenario after_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid) self.assertEqual(3, len(after_allocs_i1)) # Our VCPU allocation should be in the NUMA0 node vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid) after_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid) self.assertEqual(3, len(after_allocs_i2)) # Our VCPU allocation should be in the NUMA1 node vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU') self.assertIsNotNone(vcpu_alloc) self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid) # Our DISK_GB allocation should be in the sharing provider disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB') self.assertIsNotNone(disk_alloc) self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) # And our MEMORY_MB should remain on the root compute node ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB') self.assertIsNotNone(ram_alloc) self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)