Ejemplo n.º 1
0
    def test_delete_all_with_multiple_consumers(self):
        """Tests fix for LP #1781430 where alloc_obj.delete_all() when
        issued for a list of allocations returned by
        alloc_obj.get_by_resource_provider() where the resource provider
        had multiple consumers allocated against it, left the DB in an
        inconsistent state.
        """
        # Create a single resource provider and allocate resources for two
        # instances from it. Then grab all the provider's allocations with
        # alloc_obj.get_all_by_resource_provider() and attempt to delete
        # them all with alloc_obj.delete_all(). After which, another call
        # to alloc_obj.get_all_by_resource_provider() should return an
        # empty list.
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 8)

        c1_uuid = uuidsentinel.consumer1
        c2_uuid = uuidsentinel.consumer2

        for c_uuid in (c1_uuid, c2_uuid):
            self.allocate_from_provider(cn1, 'VCPU', 1, consumer_id=c_uuid)

        allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1)
        self.assertEqual(2, len(allocs))

        alloc_obj.delete_all(self.ctx, allocs)

        allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1)
        self.assertEqual(0, len(allocs))
Ejemplo n.º 2
0
def delete_allocations(req):
    context = req.environ['placement.context']
    context.can(policies.ALLOC_DELETE)
    consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')

    allocations = alloc_obj.get_all_by_consumer_id(context, consumer_uuid)
    if allocations:
        try:
            alloc_obj.delete_all(context, allocations)
        # NOTE(pumaranikar): Following NotFound exception added in the case
        # when allocation is deleted from allocations list by some other
        # activity. In that case, delete_all() will throw a NotFound exception.
        except exception.NotFound as exc:
            raise webob.exc.HTTPNotFound(
                "Allocation for consumer with id %(id)s not found. error: "
                "%(error)s" % {'id': consumer_uuid, 'error': exc})
    else:
        raise webob.exc.HTTPNotFound(
            "No allocations for consumer '%(consumer_uuid)s'" %
            {'consumer_uuid': consumer_uuid})
    LOG.debug("Successfully deleted allocations %s", allocations)

    req.response.status = 204
    req.response.content_type = None
    return req.response
Ejemplo n.º 3
0
    def test_create_list_and_delete_allocation(self):
        rp, _ = self._make_allocation(tb.DISK_INVENTORY, tb.DISK_ALLOCATION)

        allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp)

        self.assertEqual(1, len(allocations))

        self.assertEqual(tb.DISK_ALLOCATION['used'], allocations[0].used)

        alloc_obj.delete_all(self.ctx, allocations)

        allocations = alloc_obj.get_all_by_resource_provider(self.ctx, rp)

        self.assertEqual(0, len(allocations))
Ejemplo n.º 4
0
    def test_delete_consumer_if_no_allocs(self):
        """alloc_obj.replace_all() should attempt to delete consumers that
        no longer have any allocations. Due to the REST API not having any way
        to query for consumers directly (only via the GET
        /allocations/{consumer_uuid} endpoint which returns an empty dict even
        when no consumer record exists for the {consumer_uuid}) we need to do
        this functional test using only the object layer.
        """
        # We will use two consumers in this test, only one of which will get
        # all of its allocations deleted in a transaction (and we expect that
        # consumer record to be deleted)
        c1 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer1,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c1.create()
        c2 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer2,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c2.create()

        # Create some inventory that we will allocate
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, orc.VCPU, 8)
        tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
        tb.add_inventory(cn1, orc.DISK_GB, 2000)

        # Now allocate some of that inventory to two different consumers
        allocs = [
            alloc_obj.Allocation(consumer=c1,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=1),
            alloc_obj.Allocation(consumer=c1,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=512),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=1),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=512),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # Validate that we have consumer records for both consumers
        for c_uuid in (uuids.consumer1, uuids.consumer2):
            c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid)
            self.assertIsNotNone(c_obj)

        # OK, now "remove" the allocation for consumer2 by setting the used
        # value for both allocated resources to 0 and re-running the
        # alloc_obj.replace_all(). This should end up deleting the
        # consumer record for consumer2
        allocs = [
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=0),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=0),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # consumer1 should still exist...
        c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1)
        self.assertIsNotNone(c_obj)

        # but not consumer2...
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer2)

        # DELETE /allocations/{consumer_uuid} is the other place where we
        # delete all allocations for a consumer. Let's delete all for consumer1
        # and check that the consumer record is deleted
        alloc_list = alloc_obj.get_all_by_consumer_id(self.ctx,
                                                      uuids.consumer1)
        alloc_obj.delete_all(self.ctx, alloc_list)

        # consumer1 should no longer exist in the DB since we just deleted all
        # of its allocations
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer1)
Ejemplo n.º 5
0
    def test_allocation_list_create(self):
        max_unit = 10
        consumer_uuid = uuidsentinel.consumer

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # Create two resource providers
        rp1_name = uuidsentinel.rp1_name
        rp1_uuid = uuidsentinel.rp1_uuid
        rp1_class = orc.DISK_GB
        rp1_used = 6

        rp2_name = uuidsentinel.rp2_name
        rp2_uuid = uuidsentinel.rp2_uuid
        rp2_class = orc.IPV4_ADDRESS
        rp2_used = 2

        rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
        rp2 = self._create_provider(rp2_name, uuid=rp2_uuid)

        # Two allocations, one for each resource provider.
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=inst_consumer,
            resource_class=rp1_class, used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp2, consumer=inst_consumer,
            resource_class=rp2_class, used=rp2_used)
        allocation_list = [allocation_1, allocation_2]

        # There's no inventory, we have a failure.
        error = self.assertRaises(exception.InvalidInventory,
                                  alloc_obj.replace_all, self.ctx,
                                  allocation_list)
        # Confirm that the resource class string, not index, is in
        # the exception and resource providers are listed by uuid.
        self.assertIn(rp1_class, str(error))
        self.assertIn(rp2_class, str(error))
        self.assertIn(rp1.uuid, str(error))
        self.assertIn(rp2.uuid, str(error))

        # Add inventory for one of the two resource providers. This should also
        # fail, since rp2 has no inventory.
        tb.add_inventory(rp1, rp1_class, 1024, max_unit=1)
        self.assertRaises(exception.InvalidInventory,
                          alloc_obj.replace_all, self.ctx, allocation_list)

        # Add inventory for the second resource provider
        tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1)

        # Now the allocations will still fail because max_unit 1
        self.assertRaises(exception.InvalidAllocationConstraintsViolated,
                          alloc_obj.replace_all, self.ctx, allocation_list)
        inv1 = inv_obj.Inventory(resource_provider=rp1,
                                 resource_class=rp1_class,
                                 total=1024, max_unit=max_unit)
        rp1.set_inventory([inv1])
        inv2 = inv_obj.Inventory(resource_provider=rp2,
                                 resource_class=rp2_class,
                                 total=255, reserved=2, max_unit=max_unit)
        rp2.set_inventory([inv2])

        # Now we can finally allocate.
        alloc_obj.replace_all(self.ctx, allocation_list)

        # Check that those allocations changed usage on each
        # resource provider.
        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        rp2_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp2_uuid)
        self.assertEqual(rp1_used, rp1_usage[0].usage)
        self.assertEqual(rp2_used, rp2_usage[0].usage)

        # redo one allocation
        # TODO(cdent): This does not currently behave as expected
        # because a new allocation is created, adding to the total
        # used, not replacing.
        rp1_used += 1
        self.allocate_from_provider(
            rp1, rp1_class, rp1_used, consumer=inst_consumer)

        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        self.assertEqual(rp1_used, rp1_usage[0].usage)

        # delete the allocations for the consumer
        # NOTE(cdent): The database uses 'consumer_id' for the
        # column, presumably because some ids might not be uuids, at
        # some point in the future.
        consumer_allocations = alloc_obj.get_all_by_consumer_id(
            self.ctx, consumer_uuid)
        alloc_obj.delete_all(self.ctx, consumer_allocations)

        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        rp2_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp2_uuid)
        self.assertEqual(0, rp1_usage[0].usage)
        self.assertEqual(0, rp2_usage[0].usage)
Ejemplo n.º 6
0
    def test_nested_providers(self):
        """Create a hierarchy of resource providers and run through a series of
        tests that ensure one cannot delete a resource provider that has no
        direct allocations but its child providers do have allocations.
        """
        root_rp = self._create_provider('root_rp')
        child_rp = self._create_provider('child_rp',
                                         parent=uuidsentinel.root_rp)
        grandchild_rp = self._create_provider('grandchild_rp',
                                              parent=uuidsentinel.child_rp)

        # Verify that the root_provider_uuid of both the child and the
        # grandchild is the UUID of the grandparent
        self.assertEqual(root_rp.uuid, child_rp.root_provider_uuid)
        self.assertEqual(root_rp.uuid, grandchild_rp.root_provider_uuid)

        # Create some inventory in the grandchild, allocate some consumers to
        # the grandchild and then attempt to delete the root provider and child
        # provider, both of which should fail.
        tb.add_inventory(grandchild_rp, orc.VCPU, 1)

        # Check all providers returned when getting by root UUID
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.root_rp,
            }
        )
        self.assertEqual(3, len(rps))

        # Check all providers returned when getting by child UUID
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.child_rp,
            }
        )
        self.assertEqual(3, len(rps))

        # Check all providers returned when getting by grandchild UUID
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(3, len(rps))

        # Make sure that the member_of and uuid filters work with the in_tree
        # filter

        # No aggregate associations yet, so expect no records when adding a
        # member_of filter
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'member_of': [[uuidsentinel.agg]],
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(0, len(rps))

        # OK, associate the grandchild with an aggregate and verify that ONLY
        # the grandchild is returned when asking for the grandchild's tree
        # along with the aggregate as member_of
        grandchild_rp.set_aggregates([uuidsentinel.agg])
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'member_of': [[uuidsentinel.agg]],
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)

        # Try filtering on an unknown UUID and verify no results
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'uuid': uuidsentinel.unknown_rp,
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(0, len(rps))

        # And now check that filtering for just the child's UUID along with the
        # tree produces just a single provider (the child)
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'uuid': uuidsentinel.child_rp,
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.child_rp, rps[0].uuid)

        # Ensure that the resources filter also continues to work properly with
        # the in_tree filter. Request resources that none of the providers
        # currently have and ensure no providers are returned
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.grandchild_rp,
                'resources': {
                    'VCPU': 200,
                }
            }
        )
        self.assertEqual(0, len(rps))

        # And now ask for one VCPU, which should only return us the grandchild
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.grandchild_rp,
                'resources': {
                    'VCPU': 1,
                }
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)

        # Finally, verify we still get the grandchild if filtering on the
        # parent's UUID as in_tree
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.child_rp,
                'resources': {
                    'VCPU': 1,
                }
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)

        alloc_list = self.allocate_from_provider(
            grandchild_rp, orc.VCPU, 1)

        self.assertRaises(exception.CannotDeleteParentResourceProvider,
                          root_rp.destroy)
        self.assertRaises(exception.CannotDeleteParentResourceProvider,
                          child_rp.destroy)

        # Cannot delete provider if it has allocations
        self.assertRaises(exception.ResourceProviderInUse,
                          grandchild_rp.destroy)

        # Now remove the allocations against the child and check that we can
        # now delete the child provider
        alloc_obj.delete_all(self.ctx, alloc_list)
        grandchild_rp.destroy()
        child_rp.destroy()
        root_rp.destroy()