def test_allocation_checking(self):
        """Test that allocation check logic works with 2 resource classes on
        one provider.

        If this fails, we get a KeyError at replace_all()
        """

        max_unit = 10
        consumer_uuid = uuidsentinel.consumer
        consumer_uuid2 = uuidsentinel.consumer2

        # Create a consumer representing the two instances
        consumer = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid, user=self.user_obj,
            project=self.project_obj)
        consumer.create()
        consumer2 = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid2, user=self.user_obj,
            project=self.project_obj)
        consumer2.create()

        # Create one resource provider with 2 classes
        rp1_name = uuidsentinel.rp1_name
        rp1_uuid = uuidsentinel.rp1_uuid
        rp1_class = orc.DISK_GB
        rp1_used = 6

        rp2_class = orc.IPV4_ADDRESS
        rp2_used = 2

        rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
        tb.add_inventory(rp1, rp1_class, 1024, max_unit=max_unit)
        tb.add_inventory(rp1, rp2_class, 255, reserved=2, max_unit=max_unit)

        # create the allocations for a first consumer
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer, resource_class=rp1_class,
            used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer, resource_class=rp2_class,
            used=rp2_used)
        allocation_list = [allocation_1, allocation_2]
        alloc_obj.replace_all(self.ctx, allocation_list)

        # create the allocations for a second consumer, until we have
        # allocations for more than one consumer in the db, then we
        # won't actually be doing real allocation math, which triggers
        # the sql monster.
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer2,
            resource_class=rp1_class, used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer2,
            resource_class=rp2_class, used=rp2_used)
        allocation_list = [allocation_1, allocation_2]
        # If we are joining wrong, this will be a KeyError
        alloc_obj.replace_all(self.ctx, allocation_list)
Exemple #2
0
def set_allocation(ctx, rp, consumer, rc_used_dict):
    alloc = [
        alloc_obj.Allocation(
            resource_provider=rp, resource_class=rc,
            consumer=consumer, used=used)
        for rc, used in rc_used_dict.items()
    ]
    alloc_obj.replace_all(ctx, alloc)
    return alloc
Exemple #3
0
 def _make_allocation(self, inv_dict, alloc_dict):
     alloc_dict = copy.copy(alloc_dict)
     rp = self._create_provider('allocation_resource_provider')
     disk_inv = inv_obj.Inventory(resource_provider=rp, **inv_dict)
     rp.set_inventory([disk_inv])
     consumer_id = alloc_dict.pop('consumer_id')
     consumer = ensure_consumer(
         self.ctx, self.user_obj, self.project_obj, consumer_id)
     alloc = alloc_obj.Allocation(
         resource_provider=rp, consumer=consumer, **alloc_dict)
     alloc_obj.replace_all(self.ctx, [alloc])
     return rp, alloc
Exemple #4
0
def _new_allocations(context, resource_provider, consumer, resources):
    """Create new allocation objects for a set of resources

    Returns a list of Allocation objects

    :param context: The placement context.
    :param resource_provider: The resource provider that has the resources.
    :param consumer: The Consumer object consuming the resources.
    :param resources: A dict of resource classes and values.
    """
    allocations = []
    for resource_class in resources:
        allocation = alloc_obj.Allocation(resource_provider=resource_provider,
                                          consumer=consumer,
                                          resource_class=resource_class,
                                          used=resources[resource_class])
        allocations.append(allocation)
    return allocations
Exemple #5
0
    def test_delete_consumer_if_no_allocs(self):
        """alloc_obj.replace_all() should attempt to delete consumers that
        no longer have any allocations. Due to the REST API not having any way
        to query for consumers directly (only via the GET
        /allocations/{consumer_uuid} endpoint which returns an empty dict even
        when no consumer record exists for the {consumer_uuid}) we need to do
        this functional test using only the object layer.
        """
        # We will use two consumers in this test, only one of which will get
        # all of its allocations deleted in a transaction (and we expect that
        # consumer record to be deleted)
        c1 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer1,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c1.create()
        c2 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer2,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c2.create()

        # Create some inventory that we will allocate
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, orc.VCPU, 8)
        tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
        tb.add_inventory(cn1, orc.DISK_GB, 2000)

        # Now allocate some of that inventory to two different consumers
        allocs = [
            alloc_obj.Allocation(consumer=c1,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=1),
            alloc_obj.Allocation(consumer=c1,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=512),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=1),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=512),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # Validate that we have consumer records for both consumers
        for c_uuid in (uuids.consumer1, uuids.consumer2):
            c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid)
            self.assertIsNotNone(c_obj)

        # OK, now "remove" the allocation for consumer2 by setting the used
        # value for both allocated resources to 0 and re-running the
        # alloc_obj.replace_all(). This should end up deleting the
        # consumer record for consumer2
        allocs = [
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=0),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=0),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # consumer1 should still exist...
        c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1)
        self.assertIsNotNone(c_obj)

        # but not consumer2...
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer2)

        # DELETE /allocations/{consumer_uuid} is the other place where we
        # delete all allocations for a consumer. Let's delete all for consumer1
        # and check that the consumer record is deleted
        alloc_list = alloc_obj.get_all_by_consumer_id(self.ctx,
                                                      uuids.consumer1)
        alloc_obj.delete_all(self.ctx, alloc_list)

        # consumer1 should no longer exist in the DB since we just deleted all
        # of its allocations
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer1)
    def test_multi_provider_allocation(self):
        """Tests that an allocation that includes more than one resource
        provider can be created, listed and deleted properly.

        Bug #1707669 highlighted a situation that arose when attempting to
        remove part of an allocation for a source host during a resize
        operation where the exiting allocation was not being properly
        deleted.
        """
        cn_source = self._create_provider('cn_source')
        cn_dest = self._create_provider('cn_dest')

        # Add same inventory to both source and destination host
        for cn in (cn_source, cn_dest):
            tb.add_inventory(cn, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, orc.MEMORY_MB, 1024,
                             min_unit=64,
                             max_unit=1024,
                             step_size=64,
                             allocation_ratio=1.5)

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # Now create an allocation that represents a move operation where the
        # scheduler has selected cn_dest as the target host and created a
        # "doubled-up" allocation for the duration of the move operation
        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_source,
                resource_class=orc.VCPU,
                used=1),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_source,
                resource_class=orc.MEMORY_MB,
                used=256),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.VCPU,
                used=1),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.MEMORY_MB,
                used=256),
        ]
        alloc_obj.replace_all(self.ctx, alloc_list)

        src_allocs = alloc_obj.get_all_by_resource_provider(
            self.ctx, cn_source)

        self.assertEqual(2, len(src_allocs))

        dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest)

        self.assertEqual(2, len(dest_allocs))

        consumer_allocs = alloc_obj.get_all_by_consumer_id(
            self.ctx, uuidsentinel.instance)

        self.assertEqual(4, len(consumer_allocs))

        # Validate that when we create an allocation for a consumer that we
        # delete any existing allocation and replace it with what the new.
        # Here, we're emulating the step that occurs on confirm_resize() where
        # the source host pulls the existing allocation for the instance and
        # removes any resources that refer to itself and saves the allocation
        # back to placement
        new_alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.VCPU,
                used=1),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.MEMORY_MB,
                used=256),
        ]
        alloc_obj.replace_all(self.ctx, new_alloc_list)

        src_allocs = alloc_obj.get_all_by_resource_provider(
            self.ctx, cn_source)

        self.assertEqual(0, len(src_allocs))

        dest_allocs = alloc_obj.get_all_by_resource_provider(
            self.ctx, cn_dest)

        self.assertEqual(2, len(dest_allocs))

        consumer_allocs = alloc_obj.get_all_by_consumer_id(
            self.ctx, uuidsentinel.instance)

        self.assertEqual(2, len(consumer_allocs))
    def test_set_allocations_retry(self, mock_log):
        """Test server side allocation write retry handling."""

        # Create a single resource provider and give it some inventory.
        rp1 = self._create_provider('rp1')
        tb.add_inventory(rp1, orc.VCPU, 24,
                         allocation_ratio=16.0)
        tb.add_inventory(rp1, orc.MEMORY_MB, 1024,
                         min_unit=64,
                         max_unit=1024,
                         step_size=64)
        original_generation = rp1.generation
        # Verify the generation is what we expect (we'll be checking again
        # later).
        self.assertEqual(2, original_generation)

        # Create a consumer and have it make an allocation.
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=rp1,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=rp1,
                resource_class=orc.MEMORY_MB,
                used=1024)
        ]

        # Make sure the right exception happens when the retry loop expires.
        self.conf_fixture.config(allocation_conflict_retry_count=0,
                                 group='placement')
        self.assertRaises(
            exception.ResourceProviderConcurrentUpdateDetected,
            alloc_obj.replace_all, self.ctx, alloc_list)
        mock_log.warning.assert_called_with(
            'Exceeded retry limit of %d on allocations write', 0)

        # Make sure the right thing happens after a small number of failures.
        # There's a bit of mock magic going on here to enusre that we can
        # both do some side effects on _set_allocations as well as have the
        # real behavior. Two generation conflicts and then a success.
        mock_log.reset_mock()
        self.conf_fixture.config(allocation_conflict_retry_count=3,
                                 group='placement')
        unmocked_set = alloc_obj._set_allocations
        with mock.patch('placement.objects.allocation.'
                        '_set_allocations') as mock_set:
            exceptions = iter([
                exception.ResourceProviderConcurrentUpdateDetected(),
                exception.ResourceProviderConcurrentUpdateDetected(),
            ])

            def side_effect(*args, **kwargs):
                try:
                    raise next(exceptions)
                except StopIteration:
                    return unmocked_set(*args, **kwargs)

            mock_set.side_effect = side_effect
            alloc_obj.replace_all(self.ctx, alloc_list)
            self.assertEqual(2, mock_log.debug.call_count)
            mock_log.debug.called_with(
                'Retrying allocations write on resource provider '
                'generation conflict')
            self.assertEqual(3, mock_set.call_count)

        # Confirm we're using a different rp object after the change
        # and that it has a higher generation.
        new_rp = alloc_list[0].resource_provider
        self.assertEqual(original_generation, rp1.generation)
        self.assertEqual(original_generation + 1, new_rp.generation)
    def test_create_exceeding_capacity_allocation(self):
        """Tests on a list of allocations which contains an invalid allocation
        exceeds resource provider's capacity.

        Expect InvalidAllocationCapacityExceeded to be raised and all
        allocations in the list should not be applied.

        """
        empty_rp = self._create_provider('empty_rp')
        full_rp = self._create_provider('full_rp')

        for rp in (empty_rp, full_rp):
            tb.add_inventory(rp, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(rp, orc.MEMORY_MB, 1024,
                             min_unit=64,
                             max_unit=1024,
                             step_size=64)

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # First create a allocation to consume full_rp's resource.
        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=full_rp,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=full_rp,
                resource_class=orc.MEMORY_MB,
                used=1024)
        ]
        alloc_obj.replace_all(self.ctx, alloc_list)

        # Create a consumer representing the second instance
        inst2_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance2, user=self.user_obj,
            project=self.project_obj)
        inst2_consumer.create()

        # Create an allocation list consisting of valid requests and an invalid
        # request exceeding the memory full_rp can provide.
        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=empty_rp,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=empty_rp,
                resource_class=orc.MEMORY_MB,
                used=512),
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=full_rp,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=full_rp,
                resource_class=orc.MEMORY_MB,
                used=512),
        ]

        self.assertRaises(exception.InvalidAllocationCapacityExceeded,
                          alloc_obj.replace_all, self.ctx, alloc_list)

        # Make sure that allocations of both empty_rp and full_rp remain
        # unchanged.
        allocations = alloc_obj.get_all_by_resource_provider(self.ctx, full_rp)
        self.assertEqual(2, len(allocations))

        allocations = alloc_obj.get_all_by_resource_provider(
            self.ctx, empty_rp)
        self.assertEqual(0, len(allocations))
    def test_create_and_clear(self):
        """Test that a used of 0 in an allocation wipes allocations."""
        consumer_uuid = uuidsentinel.consumer

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        rp_class = orc.DISK_GB
        target_rp = self._make_rp_and_inventory(resource_class=rp_class,
                                                max_unit=500)

        # Create two allocations with values and confirm the resulting
        # usage is as expected.
        allocation1 = alloc_obj.Allocation(
            resource_provider=target_rp, consumer=inst_consumer,
            resource_class=rp_class, used=100)
        allocation2 = alloc_obj.Allocation(
            resource_provider=target_rp, consumer=inst_consumer,
            resource_class=rp_class, used=200)
        allocation_list = [allocation1, allocation2]
        alloc_obj.replace_all(self.ctx, allocation_list)

        allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid)
        self.assertEqual(2, len(allocations))
        usage = sum(alloc.used for alloc in allocations)
        self.assertEqual(300, usage)

        # Create two allocations, one with 0 used, to confirm the
        # resulting usage is only of one.
        allocation1 = alloc_obj.Allocation(
            resource_provider=target_rp, consumer=inst_consumer,
            resource_class=rp_class, used=0)
        allocation2 = alloc_obj.Allocation(
            resource_provider=target_rp, consumer=inst_consumer,
            resource_class=rp_class, used=200)
        allocation_list = [allocation1, allocation2]
        alloc_obj.replace_all(self.ctx, allocation_list)

        allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid)
        self.assertEqual(1, len(allocations))
        usage = allocations[0].used
        self.assertEqual(200, usage)

        # add a source rp and a migration consumer
        migration_uuid = uuidsentinel.migration

        # Create a consumer representing the migration
        mig_consumer = consumer_obj.Consumer(
            self.ctx, uuid=migration_uuid, user=self.user_obj,
            project=self.project_obj)
        mig_consumer.create()

        source_rp = self._make_rp_and_inventory(
            rp_name=uuidsentinel.source_name, rp_uuid=uuidsentinel.source_uuid,
            resource_class=rp_class, max_unit=500)

        # Create two allocations, one as the consumer, one as the
        # migration.
        allocation1 = alloc_obj.Allocation(
            resource_provider=target_rp, consumer=inst_consumer,
            resource_class=rp_class, used=200)
        allocation2 = alloc_obj.Allocation(
            resource_provider=source_rp, consumer=mig_consumer,
            resource_class=rp_class, used=200)
        allocation_list = [allocation1, allocation2]
        alloc_obj.replace_all(self.ctx, allocation_list)

        # Check primary consumer allocations.
        allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid)
        self.assertEqual(1, len(allocations))
        usage = allocations[0].used
        self.assertEqual(200, usage)

        # Check migration allocations.
        allocations = alloc_obj.get_all_by_consumer_id(
            self.ctx, migration_uuid)
        self.assertEqual(1, len(allocations))
        usage = allocations[0].used
        self.assertEqual(200, usage)

        # Clear the migration and confirm the target.
        allocation1 = alloc_obj.Allocation(
            resource_provider=target_rp, consumer=inst_consumer,
            resource_class=rp_class, used=200)
        allocation2 = alloc_obj.Allocation(
            resource_provider=source_rp, consumer=mig_consumer,
            resource_class=rp_class, used=0)
        allocation_list = [allocation1, allocation2]
        alloc_obj.replace_all(self.ctx, allocation_list)

        allocations = alloc_obj.get_all_by_consumer_id(self.ctx, consumer_uuid)
        self.assertEqual(1, len(allocations))
        usage = allocations[0].used
        self.assertEqual(200, usage)

        allocations = alloc_obj.get_all_by_consumer_id(
            self.ctx, migration_uuid)
        self.assertEqual(0, len(allocations))
    def test_allocation_list_create(self):
        max_unit = 10
        consumer_uuid = uuidsentinel.consumer

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # Create two resource providers
        rp1_name = uuidsentinel.rp1_name
        rp1_uuid = uuidsentinel.rp1_uuid
        rp1_class = orc.DISK_GB
        rp1_used = 6

        rp2_name = uuidsentinel.rp2_name
        rp2_uuid = uuidsentinel.rp2_uuid
        rp2_class = orc.IPV4_ADDRESS
        rp2_used = 2

        rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
        rp2 = self._create_provider(rp2_name, uuid=rp2_uuid)

        # Two allocations, one for each resource provider.
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=inst_consumer,
            resource_class=rp1_class, used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp2, consumer=inst_consumer,
            resource_class=rp2_class, used=rp2_used)
        allocation_list = [allocation_1, allocation_2]

        # There's no inventory, we have a failure.
        error = self.assertRaises(exception.InvalidInventory,
                                  alloc_obj.replace_all, self.ctx,
                                  allocation_list)
        # Confirm that the resource class string, not index, is in
        # the exception and resource providers are listed by uuid.
        self.assertIn(rp1_class, str(error))
        self.assertIn(rp2_class, str(error))
        self.assertIn(rp1.uuid, str(error))
        self.assertIn(rp2.uuid, str(error))

        # Add inventory for one of the two resource providers. This should also
        # fail, since rp2 has no inventory.
        tb.add_inventory(rp1, rp1_class, 1024, max_unit=1)
        self.assertRaises(exception.InvalidInventory,
                          alloc_obj.replace_all, self.ctx, allocation_list)

        # Add inventory for the second resource provider
        tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1)

        # Now the allocations will still fail because max_unit 1
        self.assertRaises(exception.InvalidAllocationConstraintsViolated,
                          alloc_obj.replace_all, self.ctx, allocation_list)
        inv1 = inv_obj.Inventory(resource_provider=rp1,
                                 resource_class=rp1_class,
                                 total=1024, max_unit=max_unit)
        rp1.set_inventory([inv1])
        inv2 = inv_obj.Inventory(resource_provider=rp2,
                                 resource_class=rp2_class,
                                 total=255, reserved=2, max_unit=max_unit)
        rp2.set_inventory([inv2])

        # Now we can finally allocate.
        alloc_obj.replace_all(self.ctx, allocation_list)

        # Check that those allocations changed usage on each
        # resource provider.
        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        rp2_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp2_uuid)
        self.assertEqual(rp1_used, rp1_usage[0].usage)
        self.assertEqual(rp2_used, rp2_usage[0].usage)

        # redo one allocation
        # TODO(cdent): This does not currently behave as expected
        # because a new allocation is created, adding to the total
        # used, not replacing.
        rp1_used += 1
        self.allocate_from_provider(
            rp1, rp1_class, rp1_used, consumer=inst_consumer)

        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        self.assertEqual(rp1_used, rp1_usage[0].usage)

        # delete the allocations for the consumer
        # NOTE(cdent): The database uses 'consumer_id' for the
        # column, presumably because some ids might not be uuids, at
        # some point in the future.
        consumer_allocations = alloc_obj.get_all_by_consumer_id(
            self.ctx, consumer_uuid)
        alloc_obj.delete_all(self.ctx, consumer_allocations)

        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        rp2_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp2_uuid)
        self.assertEqual(0, rp1_usage[0].usage)
        self.assertEqual(0, rp2_usage[0].usage)
Exemple #11
0
    def test_reshape(self):
        """We set up the following scenario:

        BEFORE: single compute node setup

          A single compute node with:
            - VCPU, MEMORY_MB, DISK_GB inventory
            - Two instances consuming CPU, RAM and DISK from that compute node

        AFTER: hierarchical + shared storage setup

          A compute node parent provider with:
            - MEMORY_MB
          Two NUMA node child providers containing:
            - VCPU
          Shared storage provider with:
            - DISK_GB
          Both instances have their resources split among the providers and
          shared storage accordingly
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i1_uuid, user=self.user_obj,
            project=self.project_obj)
        i1_consumer.create()

        i2_uuid = uuids.instance2
        i2_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i2_uuid, user=self.user_obj,
            project=self.project_obj)
        i2_consumer.create()

        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate both instances against the single compute node
        for consumer in (i1_consumer, i2_consumer):
            allocs = [
                alloc_obj.Allocation(
                    resource_provider=cn1,
                    resource_class='VCPU', consumer=consumer, used=2),
                alloc_obj.Allocation(
                    resource_provider=cn1,
                    resource_class='MEMORY_MB', consumer=consumer, used=1024),
                alloc_obj.Allocation(
                    resource_provider=cn1,
                    resource_class='DISK_GB', consumer=consumer, used=100),
            ]
            alloc_obj.replace_all(self.ctx, allocs)

        # Verify we have the allocations we expect for the BEFORE scenario
        before_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid)
        self.assertEqual(3, len(before_allocs_i1))
        self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid)
        before_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid)
        self.assertEqual(3, len(before_allocs_i2))
        self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid)

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1: [
                inv_obj.Inventory(
                    resource_provider=cn1,
                    resource_class='MEMORY_MB', total=32768, reserved=0,
                    max_unit=32768, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # each NUMA node gets half of the CPUs
            cn1_numa0: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa0,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            cn1_numa1: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa1,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # The sharing provider gets a bunch of disk
            ss: [
                inv_obj.Inventory(
                    resource_provider=ss,
                    resource_class='DISK_GB', total=100000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid)
        after_allocs = [
            # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            alloc_obj.Allocation(
                resource_provider=cn1_numa0, resource_class='VCPU',
                consumer=i1_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i1_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=ss, resource_class='DISK_GB',
                consumer=i1_consumer, used=100),
            # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            alloc_obj.Allocation(
                resource_provider=cn1_numa1, resource_class='VCPU',
                consumer=i2_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i2_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=ss, resource_class='DISK_GB',
                consumer=i2_consumer, used=100),
        ]
        reshaper.reshape(self.ctx, after_inventories, after_allocs)

        # Verify that the inventories have been moved to the appropriate
        # providers in the AFTER scenario

        # The root compute node should only have MEMORY_MB, nothing else
        cn1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1)
        self.assertEqual(1, len(cn1_inv))
        self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class)
        self.assertEqual(32768, cn1_inv[0].total)
        # Each NUMA node should only have half the original VCPU, nothing else
        numa0_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa0)
        self.assertEqual(1, len(numa0_inv))
        self.assertEqual('VCPU', numa0_inv[0].resource_class)
        self.assertEqual(8, numa0_inv[0].total)
        numa1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa1)
        self.assertEqual(1, len(numa1_inv))
        self.assertEqual('VCPU', numa1_inv[0].resource_class)
        self.assertEqual(8, numa1_inv[0].total)
        # The sharing storage provider should only have DISK_GB, nothing else
        ss_inv = inv_obj.get_all_by_resource_provider(self.ctx, ss)
        self.assertEqual(1, len(ss_inv))
        self.assertEqual('DISK_GB', ss_inv[0].resource_class)
        self.assertEqual(100000, ss_inv[0].total)

        # Verify we have the allocations we expect for the AFTER scenario
        after_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid)
        self.assertEqual(3, len(after_allocs_i1))
        # Our VCPU allocation should be in the NUMA0 node
        vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)

        after_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid)
        self.assertEqual(3, len(after_allocs_i2))
        # Our VCPU allocation should be in the NUMA1 node
        vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
Exemple #12
0
    def test_reshape_concurrent_inventory_update(self):
        """Valid failure scenario for reshape(). We test a situation where the
        virt driver has constructed it's "after inventories and allocations"
        and sent those to the POST /reshape endpoint. The reshape POST handler
        does a quick check of the resource provider generations sent in the
        payload and they all check out.

        However, right before the call to resource_provider.reshape(), another
        thread legitimately changes the inventory of one of the providers
        involved in the reshape transaction. We should get a
        ConcurrentUpdateDetected in this case.
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i1_uuid, user=self.user_obj,
            project=self.project_obj)
        i1_consumer.create()

        # then all our original providers
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate an instance on our compute node
        allocs = [
            alloc_obj.Allocation(
                resource_provider=cn1,
                resource_class='VCPU', consumer=i1_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1,
                resource_class='MEMORY_MB', consumer=i1_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=cn1,
                resource_class='DISK_GB', consumer=i1_consumer, used=100),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1: [
                inv_obj.Inventory(
                    resource_provider=cn1,
                    resource_class='MEMORY_MB', total=32768, reserved=0,
                    max_unit=32768, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # each NUMA node gets half of the CPUs
            cn1_numa0: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa0,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            cn1_numa1: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa1,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # The sharing provider gets a bunch of disk
            ss: [
                inv_obj.Inventory(
                    resource_provider=ss,
                    resource_class='DISK_GB', total=100000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        after_allocs = [
            # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            alloc_obj.Allocation(
                resource_provider=cn1_numa0, resource_class='VCPU',
                consumer=i1_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i1_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=ss, resource_class='DISK_GB',
                consumer=i1_consumer, used=100),
        ]

        # OK, now before we call reshape(), here we emulate another thread
        # changing the inventory for the sharing storage provider in between
        # the time in the REST handler when the sharing storage provider's
        # generation was validated and the actual call to reshape()
        ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid)
        # Reduce the amount of storage to 2000, from 100000.
        new_ss_inv = [
            inv_obj.Inventory(
                resource_provider=ss_threadB, resource_class='DISK_GB',
                total=2000, reserved=0, max_unit=1000, min_unit=1, step_size=1,
                allocation_ratio=1.0)]
        ss_threadB.set_inventory(new_ss_inv)
        # Double check our storage provider's generation is now greater than
        # the original storage provider record being sent to reshape()
        self.assertGreater(ss_threadB.generation, ss.generation)

        # And we should legitimately get a failure now to reshape() due to
        # another thread updating one of the involved provider's generations
        self.assertRaises(
            exception.ConcurrentUpdateDetected,
            reshaper.reshape, self.ctx, after_inventories, after_allocs)