def test_delete_all_with_multiple_consumers(self):
        """Tests fix for LP #1781430 where alloc_obj.delete_all() when
        issued for a list of allocations returned by
        alloc_obj.get_by_resource_provider() where the resource provider
        had multiple consumers allocated against it, left the DB in an
        inconsistent state.
        """
        # Create a single resource provider and allocate resources for two
        # instances from it. Then grab all the provider's allocations with
        # alloc_obj.get_all_by_resource_provider() and attempt to delete
        # them all with alloc_obj.delete_all(). After which, another call
        # to alloc_obj.get_all_by_resource_provider() should return an
        # empty list.
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 8)

        c1_uuid = uuidsentinel.consumer1
        c2_uuid = uuidsentinel.consumer2

        for c_uuid in (c1_uuid, c2_uuid):
            self.allocate_from_provider(cn1, 'VCPU', 1, consumer_id=c_uuid)

        allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1)
        self.assertEqual(2, len(allocs))

        alloc_obj.delete_all(self.ctx, allocs)

        allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn1)
        self.assertEqual(0, len(allocs))
Exemple #2
0
    def start_fixture(self):
        super(SharedStorageFixture, self).start_fixture()

        agg_uuid = uuidutils.generate_uuid()

        cn1 = tb.create_provider(self.context, 'cn1', agg_uuid)
        cn2 = tb.create_provider(self.context, 'cn2', agg_uuid)
        ss = tb.create_provider(self.context, 'ss', agg_uuid)
        ss2 = tb.create_provider(self.context, 'ss2', agg_uuid)

        numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid)
        numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
        numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
        numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)

        pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid)
        pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid)
        pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid)
        pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid)

        os.environ['AGG_UUID'] = agg_uuid

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid
        os.environ['SS_UUID'] = ss.uuid
        os.environ['SS2_UUID'] = ss2.uuid

        os.environ['NUMA1_1_UUID'] = numa1_1.uuid
        os.environ['NUMA1_2_UUID'] = numa1_2.uuid
        os.environ['NUMA2_1_UUID'] = numa2_1.uuid
        os.environ['NUMA2_2_UUID'] = numa2_2.uuid

        os.environ['PF1_1_UUID'] = pf1_1.uuid
        os.environ['PF1_2_UUID'] = pf1_2.uuid
        os.environ['PF2_1_UUID'] = pf2_1.uuid
        os.environ['PF2_2_UUID'] = pf2_2.uuid

        # Populate compute node inventory for VCPU and RAM
        for cn in (cn1, cn2):
            tb.add_inventory(cn, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, orc.MEMORY_MB, 128 * 1024,
                             allocation_ratio=1.5)

        tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')
        tb.add_inventory(cn2, orc.DISK_GB, 2000,
                         reserved=100, allocation_ratio=1.0)

        for shared in (ss, ss2):
            # Populate shared storage provider with DISK_GB inventory and
            # mark it shared among any provider associated via aggregate
            tb.add_inventory(shared, orc.DISK_GB, 2000,
                             reserved=100, allocation_ratio=1.0)
            tb.set_traits(shared, 'MISC_SHARES_VIA_AGGREGATE')

        # Populate PF inventory for VF
        for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
            tb.add_inventory(pf, orc.SRIOV_NET_VF,
                             8, allocation_ratio=1.0)
 def _make_rp_and_inventory(self, **kwargs):
     # Create one resource provider and set some inventory
     rp_name = kwargs.get('rp_name') or uuidsentinel.rp_name
     rp_uuid = kwargs.get('rp_uuid') or uuidsentinel.rp_uuid
     rp = self._create_provider(rp_name, uuid=rp_uuid)
     rc = kwargs.pop('resource_class')
     tb.add_inventory(rp, rc, 1024, **kwargs)
     return rp
Exemple #4
0
    def test_get_all_multiple_inv(self):
        db_rp = self._create_provider('rp_no_inv')
        tb.add_inventory(db_rp, orc.DISK_GB, 1024)
        tb.add_inventory(db_rp, orc.VCPU, 24)

        usages = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, db_rp.uuid)
        self.assertEqual(2, len(usages))
 def test_add_allocation_increments_generation(self):
     rp = self._create_provider(name='foo')
     tb.add_inventory(rp, tb.DISK_INVENTORY['resource_class'],
                      tb.DISK_INVENTORY['total'])
     expected_gen = rp.generation + 1
     self.allocate_from_provider(rp, tb.DISK_ALLOCATION['resource_class'],
                                 tb.DISK_ALLOCATION['used'])
     self.assertEqual(expected_gen, rp.generation)
    def test_allocation_checking(self):
        """Test that allocation check logic works with 2 resource classes on
        one provider.

        If this fails, we get a KeyError at replace_all()
        """

        max_unit = 10
        consumer_uuid = uuidsentinel.consumer
        consumer_uuid2 = uuidsentinel.consumer2

        # Create a consumer representing the two instances
        consumer = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid, user=self.user_obj,
            project=self.project_obj)
        consumer.create()
        consumer2 = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid2, user=self.user_obj,
            project=self.project_obj)
        consumer2.create()

        # Create one resource provider with 2 classes
        rp1_name = uuidsentinel.rp1_name
        rp1_uuid = uuidsentinel.rp1_uuid
        rp1_class = orc.DISK_GB
        rp1_used = 6

        rp2_class = orc.IPV4_ADDRESS
        rp2_used = 2

        rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
        tb.add_inventory(rp1, rp1_class, 1024, max_unit=max_unit)
        tb.add_inventory(rp1, rp2_class, 255, reserved=2, max_unit=max_unit)

        # create the allocations for a first consumer
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer, resource_class=rp1_class,
            used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer, resource_class=rp2_class,
            used=rp2_used)
        allocation_list = [allocation_1, allocation_2]
        alloc_obj.replace_all(self.ctx, allocation_list)

        # create the allocations for a second consumer, until we have
        # allocations for more than one consumer in the db, then we
        # won't actually be doing real allocation math, which triggers
        # the sql monster.
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer2,
            resource_class=rp1_class, used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=consumer2,
            resource_class=rp2_class, used=rp2_used)
        allocation_list = [allocation_1, allocation_2]
        # If we are joining wrong, this will be a KeyError
        alloc_obj.replace_all(self.ctx, allocation_list)
Exemple #7
0
    def test_get_inventory_no_allocation(self):
        db_rp = self._create_provider('rp_no_inv')
        tb.add_inventory(db_rp, orc.DISK_GB, 1024)

        usages = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, db_rp.uuid)
        self.assertEqual(1, len(usages))
        self.assertEqual(0, usages[0].usage)
        self.assertEqual(orc.DISK_GB, usages[0].resource_class)
Exemple #8
0
    def start_fixture(self):
        super(AllocationFixture, self).start_fixture()

        # For use creating and querying allocations/usages
        os.environ['ALT_USER_ID'] = uuidutils.generate_uuid()
        project_id = os.environ['PROJECT_ID']
        user_id = os.environ['USER_ID']
        alt_user_id = os.environ['ALT_USER_ID']

        user = user_obj.User(self.context, uuid=user_id)
        user.create()
        alt_user = user_obj.User(self.context, uuid=alt_user_id)
        alt_user.create()
        project = project_obj.Project(self.context, uuid=project_id)
        project.create()

        # Stealing from the super
        rp_name = os.environ['RP_NAME']
        rp_uuid = os.environ['RP_UUID']
        # Create the rp with VCPU and DISK_GB inventory
        rp = tb.create_provider(self.context, rp_name, uuid=rp_uuid)
        tb.add_inventory(rp,
                         'DISK_GB',
                         2048,
                         step_size=10,
                         min_unit=10,
                         max_unit=1000)
        tb.add_inventory(rp, 'VCPU', 10, max_unit=10)

        # Create a first consumer for the DISK_GB allocations

        consumer1 = tb.ensure_consumer(self.context, user, project)
        tb.set_allocation(self.context, rp, consumer1, {'DISK_GB': 1000})
        os.environ['CONSUMER_0'] = consumer1.uuid

        # Create a second consumer for the VCPU allocations
        consumer2 = tb.ensure_consumer(self.context, user, project)
        tb.set_allocation(self.context, rp, consumer2, {'VCPU': 6})
        os.environ['CONSUMER_ID'] = consumer2.uuid

        # Create a consumer object for a different user
        alt_consumer = tb.ensure_consumer(self.context, alt_user, project)
        os.environ['ALT_CONSUMER_ID'] = alt_consumer.uuid

        # Create a couple of allocations for a different user.
        tb.set_allocation(self.context, rp, alt_consumer, {
            'DISK_GB': 20,
            'VCPU': 1
        })

        # The ALT_RP_XXX variables are for a resource provider that has
        # not been created in the Allocation fixture
        os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid()
        os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
 def test_destroy_resource_provider_destroy_inventory(self):
     resource_provider = self._create_provider(
         uuidsentinel.fake_resource_name,
         uuid=uuidsentinel.fake_resource_provider,
     )
     tb.add_inventory(resource_provider,
                      tb.DISK_INVENTORY['resource_class'],
                      tb.DISK_INVENTORY['total'])
     inventories = inv_obj.get_all_by_resource_provider(
         self.ctx, resource_provider)
     self.assertEqual(1, len(inventories))
     resource_provider.destroy()
     inventories = inv_obj.get_all_by_resource_provider(
         self.ctx, resource_provider)
     self.assertEqual(0, len(inventories))
    def test_get_all_by_resource_provider_multiple_providers(self):
        rp1 = self._create_provider('cn1')
        rp2 = self._create_provider(name='cn2')

        for rp in (rp1, rp2):
            tb.add_inventory(rp, tb.DISK_INVENTORY['resource_class'],
                             tb.DISK_INVENTORY['total'])
            tb.add_inventory(rp, orc.IPV4_ADDRESS, 10,
                             max_unit=2)

        # Get inventories for the first resource provider and validate
        # the inventory records have a matching resource provider
        got_inv = inv_obj.get_all_by_resource_provider(self.ctx, rp1)
        for inv in got_inv:
            self.assertEqual(rp1.id, inv.resource_provider.id)
Exemple #11
0
    def start_fixture(self):
        super(NUMAAggregateFixture, self).start_fixture()

        aggA_uuid = uuidutils.generate_uuid()
        aggB_uuid = uuidutils.generate_uuid()
        aggC_uuid = uuidutils.generate_uuid()

        cn1 = tb.create_provider(self.context, 'cn1', aggA_uuid)
        cn2 = tb.create_provider(self.context, 'cn2', aggA_uuid, aggB_uuid)
        ss1 = tb.create_provider(self.context, 'ss1', aggA_uuid)
        ss2 = tb.create_provider(self.context, 'ss2', aggC_uuid)

        numa1_1 = tb.create_provider(self.context,
                                     'numa1_1',
                                     aggC_uuid,
                                     parent=cn1.uuid)
        numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
        numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
        numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)

        os.environ['AGGA_UUID'] = aggA_uuid
        os.environ['AGGB_UUID'] = aggB_uuid
        os.environ['AGGC_UUID'] = aggC_uuid

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid
        os.environ['SS1_UUID'] = ss1.uuid
        os.environ['SS2_UUID'] = ss2.uuid

        os.environ['NUMA1_1_UUID'] = numa1_1.uuid
        os.environ['NUMA1_2_UUID'] = numa1_2.uuid
        os.environ['NUMA2_1_UUID'] = numa2_1.uuid
        os.environ['NUMA2_2_UUID'] = numa2_2.uuid

        # Populate compute node inventory for VCPU and RAM
        for numa in (numa1_1, numa1_2, numa2_1, numa2_2):
            tb.add_inventory(numa, orc.VCPU, 24, allocation_ratio=16.0)

        # Populate shared storage provider with DISK_GB inventory and
        # mark it shared among any provider associated via aggregate
        for ss in (ss1, ss2):
            tb.add_inventory(ss,
                             orc.DISK_GB,
                             2000,
                             reserved=100,
                             allocation_ratio=1.0)
            tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')
            tb.set_sharing_among_agg(ss)
    def test_set_inventory_over_capacity(self, mock_log):
        rp = self._create_provider(uuidsentinel.rp_name)

        disk_inv = tb.add_inventory(rp, orc.DISK_GB, 2048,
                                    reserved=15,
                                    min_unit=10,
                                    max_unit=600,
                                    step_size=10)
        vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12,
                                    allocation_ratio=16.0)

        self.assertFalse(mock_log.warning.called)

        # Allocate something reasonable for the above inventory
        self.allocate_from_provider(rp, 'DISK_GB', 500)

        # Update our inventory to over-subscribe us after the above allocation
        disk_inv.total = 400
        rp.set_inventory([disk_inv, vcpu_inv])

        # We should succeed, but have logged a warning for going over on disk
        mock_log.warning.assert_called_once_with(
            mock.ANY, {'uuid': rp.uuid, 'resource': 'DISK_GB'})
    def test_set_inventory_fail_in_use(self):
        """Test attempting to set inventory which would result in removing an
        inventory record for a resource class that still has allocations
        against it.
        """
        rp = self._create_provider('compute-host')
        tb.add_inventory(rp, 'VCPU', 12)
        self.allocate_from_provider(rp, 'VCPU', 1)

        inv = inv_obj.Inventory(
            resource_provider=rp,
            resource_class='MEMORY_MB',
            total=1024,
            reserved=0,
            min_unit=256,
            max_unit=1024,
            step_size=256,
            allocation_ratio=1.0,
        )

        self.assertRaises(exception.InventoryInUse,
                          rp.set_inventory,
                          [inv])
Exemple #14
0
    def start_fixture(self):
        super(NonSharedStorageFixture, self).start_fixture()

        aggA_uuid = uuidutils.generate_uuid()
        aggB_uuid = uuidutils.generate_uuid()
        aggC_uuid = uuidutils.generate_uuid()
        os.environ['AGGA_UUID'] = aggA_uuid
        os.environ['AGGB_UUID'] = aggB_uuid
        os.environ['AGGC_UUID'] = aggC_uuid

        cn1 = tb.create_provider(self.context, 'cn1')
        cn2 = tb.create_provider(self.context, 'cn2')

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid

        # Populate compute node inventory for VCPU, RAM and DISK
        for cn in (cn1, cn2):
            tb.add_inventory(cn, 'VCPU', 24)
            tb.add_inventory(cn, 'MEMORY_MB', 128 * 1024)
            tb.add_inventory(cn, 'DISK_GB', 2000)
Exemple #15
0
    def test_delete_consumer_if_no_allocs(self):
        """alloc_obj.replace_all() should attempt to delete consumers that
        no longer have any allocations. Due to the REST API not having any way
        to query for consumers directly (only via the GET
        /allocations/{consumer_uuid} endpoint which returns an empty dict even
        when no consumer record exists for the {consumer_uuid}) we need to do
        this functional test using only the object layer.
        """
        # We will use two consumers in this test, only one of which will get
        # all of its allocations deleted in a transaction (and we expect that
        # consumer record to be deleted)
        c1 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer1,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c1.create()
        c2 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer2,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c2.create()

        # Create some inventory that we will allocate
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, orc.VCPU, 8)
        tb.add_inventory(cn1, orc.MEMORY_MB, 2048)
        tb.add_inventory(cn1, orc.DISK_GB, 2000)

        # Now allocate some of that inventory to two different consumers
        allocs = [
            alloc_obj.Allocation(consumer=c1,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=1),
            alloc_obj.Allocation(consumer=c1,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=512),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=1),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=512),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # Validate that we have consumer records for both consumers
        for c_uuid in (uuids.consumer1, uuids.consumer2):
            c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid)
            self.assertIsNotNone(c_obj)

        # OK, now "remove" the allocation for consumer2 by setting the used
        # value for both allocated resources to 0 and re-running the
        # alloc_obj.replace_all(). This should end up deleting the
        # consumer record for consumer2
        allocs = [
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.VCPU,
                                 used=0),
            alloc_obj.Allocation(consumer=c2,
                                 resource_provider=cn1,
                                 resource_class=orc.MEMORY_MB,
                                 used=0),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # consumer1 should still exist...
        c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1)
        self.assertIsNotNone(c_obj)

        # but not consumer2...
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer2)

        # DELETE /allocations/{consumer_uuid} is the other place where we
        # delete all allocations for a consumer. Let's delete all for consumer1
        # and check that the consumer record is deleted
        alloc_list = alloc_obj.get_all_by_consumer_id(self.ctx,
                                                      uuids.consumer1)
        alloc_obj.delete_all(self.ctx, alloc_list)

        # consumer1 should no longer exist in the DB since we just deleted all
        # of its allocations
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer1)
Exemple #16
0
    def make_entities(self):
        aggA_uuid = uuidutils.generate_uuid()
        os.environ['AGGA_UUID'] = aggA_uuid

        ss1 = tb.create_provider(self.context, 'ss1', aggA_uuid)
        tb.set_traits(ss1, ot.MISC_SHARES_VIA_AGGREGATE)
        tb.add_inventory(ss1, orc.DISK_GB, 2000)
        os.environ['SS1_UUID'] = ss1.uuid

        # CN1
        if not self.cn1:
            self.cn1 = tb.create_provider(self.context, 'cn1', aggA_uuid)
        self.cn1.set_aggregates([aggA_uuid])
        tb.set_traits(self.cn1, ot.COMPUTE_VOLUME_MULTI_ATTACH)
        os.environ['CN1_UUID'] = self.cn1.uuid

        numas = []
        for i in (0, 1):
            numa = tb.create_provider(
                self.context, 'numa%d' % i, parent=self.cn1.uuid)
            traits = [ot.HW_NUMA_ROOT]
            if i == 1:
                traits.append('CUSTOM_FOO')
            tb.set_traits(numa, *traits)
            tb.add_inventory(numa, orc.VCPU, 4)
            numas.append(numa)
            os.environ['NUMA%d_UUID' % i] = numa.uuid
        tb.add_inventory(
            numas[0], orc.MEMORY_MB, 2048, min_unit=512, step_size=256)
        tb.add_inventory(
            numas[1], orc.MEMORY_MB, 2048, min_unit=256, max_unit=1024)
        user, proj = tb.create_user_and_project(self.context, prefix='numafx')
        consumer = tb.ensure_consumer(self.context, user, proj)
        tb.set_allocation(self.context, numas[0], consumer, {orc.VCPU: 2})

        fpga = tb.create_provider(self.context, 'fpga0', parent=numas[0].uuid)
        # TODO(efried): Use standard FPGA resource class
        tb.add_inventory(fpga, 'CUSTOM_FPGA', 1)
        os.environ['FPGA0_UUID'] = fpga.uuid

        pgpu = tb.create_provider(self.context, 'pgpu0', parent=numas[0].uuid)
        tb.add_inventory(pgpu, orc.VGPU, 8)
        os.environ['PGPU0_UUID'] = pgpu.uuid

        for i in (0, 1):
            fpga = tb.create_provider(
                self.context, 'fpga1_%d' % i, parent=numas[1].uuid)
            # TODO(efried): Use standard FPGA resource class
            tb.add_inventory(fpga, 'CUSTOM_FPGA', 1)
            os.environ['FPGA1_%d_UUID' % i] = fpga.uuid

        agent = tb.create_provider(
            self.context, 'sriov_agent', parent=self.cn1.uuid)
        tb.set_traits(agent, 'CUSTOM_VNIC_TYPE_DIRECT')
        os.environ['SRIOV_AGENT_UUID'] = agent.uuid

        for i in (1, 2):
            dev = tb.create_provider(
                self.context, 'esn%d' % i, parent=agent.uuid)
            tb.set_traits(dev, 'CUSTOM_PHYSNET%d' % i)
            tb.add_inventory(dev, orc.NET_BW_EGR_KILOBIT_PER_SEC, 10000 * i)
            os.environ['ESN%d_UUID' % i] = dev.uuid

        agent = tb.create_provider(
            self.context, 'ovs_agent', parent=self.cn1.uuid)
        tb.set_traits(agent, 'CUSTOM_VNIC_TYPE_NORMAL')
        os.environ['OVS_AGENT_UUID'] = agent.uuid

        dev = tb.create_provider(self.context, 'br_int', parent=agent.uuid)
        tb.set_traits(dev, 'CUSTOM_PHYSNET0')
        tb.add_inventory(dev, orc.NET_BW_EGR_KILOBIT_PER_SEC, 1000)
        os.environ['BR_INT_UUID'] = dev.uuid

        # CN2
        if not self.cn2:
            self.cn2 = tb.create_provider(self.context, 'cn2')

        self.cn2.set_aggregates([aggA_uuid])
        tb.add_inventory(self.cn2, orc.VCPU, 8)
        # Get a new consumer
        consumer = tb.ensure_consumer(self.context, user, proj)
        tb.set_allocation(self.context, self.cn2, consumer, {orc.VCPU: 3})
        tb.add_inventory(
            self.cn2, orc.MEMORY_MB, 2048, min_unit=1024, step_size=128)
        tb.add_inventory(self.cn2, orc.DISK_GB, 1000)
        tb.set_traits(self.cn2, 'CUSTOM_FOO')
        os.environ['CN2_UUID'] = self.cn2.uuid

        nics = []
        for i in (1, 2, 3):
            nic = tb.create_provider(
                self.context, 'nic%d' % i, parent=self.cn2.uuid)
            # TODO(efried): Use standard HW_NIC_ROOT trait
            tb.set_traits(nic, 'CUSTOM_HW_NIC_ROOT')
            nics.append(nic)
            os.environ['NIC%s_UUID' % i] = nic.uuid
        # PFs for NIC1
        for i in (1, 2):
            suf = '1_%d' % i
            pf = tb.create_provider(
                self.context, 'pf%s' % suf, parent=nics[0].uuid)
            tb.set_traits(pf, 'CUSTOM_PHYSNET%d' % i)
            # TODO(efried): Use standard generic VF resource class?
            tb.add_inventory(pf, 'CUSTOM_VF', 4)
            os.environ['PF%s_UUID' % suf] = pf.uuid
        # PFs for NIC2
        for i in (0, 1, 2, 3):
            suf = '2_%d' % (i + 1)
            pf = tb.create_provider(
                self.context, 'pf%s' % suf, parent=nics[1].uuid)
            tb.set_traits(pf, 'CUSTOM_PHYSNET%d' % ((i % 2) + 1))
            # TODO(efried): Use standard generic VF resource class?
            tb.add_inventory(pf, 'CUSTOM_VF', 2)
            os.environ['PF%s_UUID' % suf] = pf.uuid
        # PF for NIC3
        suf = '3_1'
        pf = tb.create_provider(
            self.context, 'pf%s' % suf, parent=nics[2].uuid)
        tb.set_traits(pf, 'CUSTOM_PHYSNET1')
        # TODO(efried): Use standard generic VF resource class?
        tb.add_inventory(pf, 'CUSTOM_VF', 8)
        os.environ['PF%s_UUID' % suf] = pf.uuid
Exemple #17
0
    def start_fixture(self):
        super(GranularFixture, self).start_fixture()

        rc_obj.ResourceClass(
            context=self.context, name='CUSTOM_NET_MBPS').create()

        os.environ['AGGA'] = uuids.aggA
        os.environ['AGGB'] = uuids.aggB
        os.environ['AGGC'] = uuids.aggC

        cn_left = tb.create_provider(self.context, 'cn_left', uuids.aggA)
        os.environ['CN_LEFT'] = cn_left.uuid
        tb.add_inventory(cn_left, 'VCPU', 8)
        tb.add_inventory(cn_left, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_left, 'DISK_GB', 500)
        tb.add_inventory(cn_left, 'VGPU', 8)
        tb.add_inventory(cn_left, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO')

        cn_middle = tb.create_provider(
            self.context, 'cn_middle', uuids.aggA, uuids.aggB)
        os.environ['CN_MIDDLE'] = cn_middle.uuid
        tb.add_inventory(cn_middle, 'VCPU', 8)
        tb.add_inventory(cn_middle, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_middle, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS')

        cn_right = tb.create_provider(
            self.context, 'cn_right', uuids.aggB, uuids.aggC)
        os.environ['CN_RIGHT'] = cn_right.uuid
        tb.add_inventory(cn_right, 'VCPU', 8)
        tb.add_inventory(cn_right, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_right, 'DISK_GB', 500)
        tb.add_inventory(cn_right, 'VGPU', 8, max_unit=2)
        tb.set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA',
                      'CUSTOM_DISK_SSD')

        shr_disk_1 = tb.create_provider(self.context, 'shr_disk_1', uuids.aggA)
        os.environ['SHR_DISK_1'] = shr_disk_1.uuid
        tb.add_inventory(shr_disk_1, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_1, 'MISC_SHARES_VIA_AGGREGATE',
                      'CUSTOM_DISK_SSD')

        shr_disk_2 = tb.create_provider(
            self.context, 'shr_disk_2', uuids.aggA, uuids.aggB)
        os.environ['SHR_DISK_2'] = shr_disk_2.uuid
        tb.add_inventory(shr_disk_2, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_2, 'MISC_SHARES_VIA_AGGREGATE')

        shr_net = tb.create_provider(self.context, 'shr_net', uuids.aggC)
        os.environ['SHR_NET'] = shr_net.uuid
        tb.add_inventory(shr_net, 'SRIOV_NET_VF', 16)
        tb.add_inventory(shr_net, 'CUSTOM_NET_MBPS', 40000)
        tb.set_traits(shr_net, 'MISC_SHARES_VIA_AGGREGATE')
    def test_multi_provider_allocation(self):
        """Tests that an allocation that includes more than one resource
        provider can be created, listed and deleted properly.

        Bug #1707669 highlighted a situation that arose when attempting to
        remove part of an allocation for a source host during a resize
        operation where the exiting allocation was not being properly
        deleted.
        """
        cn_source = self._create_provider('cn_source')
        cn_dest = self._create_provider('cn_dest')

        # Add same inventory to both source and destination host
        for cn in (cn_source, cn_dest):
            tb.add_inventory(cn, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, orc.MEMORY_MB, 1024,
                             min_unit=64,
                             max_unit=1024,
                             step_size=64,
                             allocation_ratio=1.5)

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # Now create an allocation that represents a move operation where the
        # scheduler has selected cn_dest as the target host and created a
        # "doubled-up" allocation for the duration of the move operation
        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_source,
                resource_class=orc.VCPU,
                used=1),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_source,
                resource_class=orc.MEMORY_MB,
                used=256),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.VCPU,
                used=1),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.MEMORY_MB,
                used=256),
        ]
        alloc_obj.replace_all(self.ctx, alloc_list)

        src_allocs = alloc_obj.get_all_by_resource_provider(
            self.ctx, cn_source)

        self.assertEqual(2, len(src_allocs))

        dest_allocs = alloc_obj.get_all_by_resource_provider(self.ctx, cn_dest)

        self.assertEqual(2, len(dest_allocs))

        consumer_allocs = alloc_obj.get_all_by_consumer_id(
            self.ctx, uuidsentinel.instance)

        self.assertEqual(4, len(consumer_allocs))

        # Validate that when we create an allocation for a consumer that we
        # delete any existing allocation and replace it with what the new.
        # Here, we're emulating the step that occurs on confirm_resize() where
        # the source host pulls the existing allocation for the instance and
        # removes any resources that refer to itself and saves the allocation
        # back to placement
        new_alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.VCPU,
                used=1),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=cn_dest,
                resource_class=orc.MEMORY_MB,
                used=256),
        ]
        alloc_obj.replace_all(self.ctx, new_alloc_list)

        src_allocs = alloc_obj.get_all_by_resource_provider(
            self.ctx, cn_source)

        self.assertEqual(0, len(src_allocs))

        dest_allocs = alloc_obj.get_all_by_resource_provider(
            self.ctx, cn_dest)

        self.assertEqual(2, len(dest_allocs))

        consumer_allocs = alloc_obj.get_all_by_consumer_id(
            self.ctx, uuidsentinel.instance)

        self.assertEqual(2, len(consumer_allocs))
Exemple #19
0
    def start_fixture(self):
        super(NeutronQoSMultiSegmentFixture, self).start_fixture()

        # compute 0 with not connectivity to the multi segment network
        compute0 = tb.create_provider(self.context, 'compute0')
        os.environ['compute0'] = compute0.uuid
        tb.add_inventory(compute0, 'VCPU', 8)
        tb.add_inventory(compute0, 'MEMORY_MB', 4096)
        tb.add_inventory(compute0, 'DISK_GB', 500)

        # OVS agent subtree
        compute0_ovs_agent = tb.create_provider(self.context,
                                                'compute0:Open vSwitch agent',
                                                parent=compute0.uuid)
        os.environ['compute0:ovs_agent'] = compute0_ovs_agent.uuid
        tb.add_inventory(compute0_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute0_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute0_br_ex = tb.create_provider(
            self.context,
            'compute0:Open vSwitch agent:br-ex',
            parent=compute0_ovs_agent.uuid)
        os.environ['compute0:br_ex'] = compute0_br_ex.uuid
        tb.add_inventory(compute0_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 5000)
        tb.add_inventory(compute0_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 5000)
        tb.set_traits(
            compute0_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_OTHER',
        )

        # SRIOV agent subtree
        compute0_sriov_agent = tb.create_provider(self.context,
                                                  'compute0:NIC Switch agent',
                                                  parent=compute0.uuid)
        os.environ['compute0:sriov_agent'] = compute0_sriov_agent.uuid
        tb.set_traits(
            compute0_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute0_pf0 = tb.create_provider(
            self.context,
            'compute0:NIC Switch agent:enp129s0f0',
            parent=compute0_sriov_agent.uuid)
        os.environ['compute0:pf0'] = compute0_pf0.uuid
        tb.add_inventory(compute0_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 10000)
        tb.add_inventory(compute0_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 10000)
        tb.set_traits(
            compute0_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_OTHER',
        )

        # compute 1 with network connectivity to segment 1
        compute1 = tb.create_provider(self.context, 'compute1')
        os.environ['compute1'] = compute1.uuid
        tb.add_inventory(compute1, 'VCPU', 8)
        tb.add_inventory(compute1, 'MEMORY_MB', 4096)
        tb.add_inventory(compute1, 'DISK_GB', 500)
        # OVS agent subtree
        compute1_ovs_agent = tb.create_provider(self.context,
                                                'compute1:Open vSwitch agent',
                                                parent=compute1.uuid)
        os.environ['compute1:ovs_agent'] = compute1_ovs_agent.uuid
        tb.add_inventory(compute1_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute1_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute1_br_ex = tb.create_provider(
            self.context,
            'compute1:Open vSwitch agent:br-ex',
            parent=compute1_ovs_agent.uuid)
        os.environ['compute1:br_ex'] = compute1_br_ex.uuid
        tb.add_inventory(compute1_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 5000)
        tb.add_inventory(compute1_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 5000)
        tb.set_traits(
            compute1_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        # SRIOV agent subtree
        compute1_sriov_agent = tb.create_provider(self.context,
                                                  'compute1:NIC Switch agent',
                                                  parent=compute1.uuid)
        os.environ['compute1:sriov_agent'] = compute1_sriov_agent.uuid
        tb.set_traits(
            compute1_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute1_pf0 = tb.create_provider(
            self.context,
            'compute1:NIC Switch agent:enp129s0f0',
            parent=compute1_sriov_agent.uuid)
        os.environ['compute1:pf0'] = compute1_pf0.uuid
        tb.add_inventory(compute1_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 10000)
        tb.add_inventory(compute1_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 10000)
        tb.set_traits(
            compute1_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        # compute 2 with network connectivity to segment 2
        compute2 = tb.create_provider(self.context, 'compute2')
        os.environ['compute2'] = compute2.uuid
        tb.add_inventory(compute2, 'VCPU', 8)
        tb.add_inventory(compute2, 'MEMORY_MB', 4096)
        tb.add_inventory(compute2, 'DISK_GB', 500)

        # OVS agent subtree
        compute2_ovs_agent = tb.create_provider(self.context,
                                                'compute2:Open vSwitch agent',
                                                parent=compute2.uuid)
        os.environ['compute2:ovs_agent'] = compute2_ovs_agent.uuid
        tb.add_inventory(compute2_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute2_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute2_br_ex = tb.create_provider(
            self.context,
            'compute2:Open vSwitch agent:br-ex',
            parent=compute2_ovs_agent.uuid)
        os.environ['compute2:br_ex'] = compute2_br_ex.uuid
        tb.add_inventory(compute2_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 5000)
        tb.add_inventory(compute2_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 5000)
        tb.set_traits(
            compute2_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S2',
        )

        # SRIOV agent subtree
        compute2_sriov_agent = tb.create_provider(self.context,
                                                  'compute2:NIC Switch agent',
                                                  parent=compute2.uuid)
        os.environ['compute2:sriov_agent'] = compute2_sriov_agent.uuid
        tb.set_traits(
            compute2_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute2_pf0 = tb.create_provider(
            self.context,
            'compute2:NIC Switch agent:enp129s0f0',
            parent=compute2_sriov_agent.uuid)
        os.environ['compute2:pf0'] = compute2_pf0.uuid
        tb.add_inventory(compute2_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 10000)
        tb.add_inventory(compute2_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 10000)
        tb.set_traits(
            compute2_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S2',
        )

        # compute 3 with network connectivity to both segment 1 and 2
        compute3 = tb.create_provider(self.context, 'compute3')
        os.environ['compute3'] = compute3.uuid
        tb.add_inventory(compute3, 'VCPU', 8)
        tb.add_inventory(compute3, 'MEMORY_MB', 4096)
        tb.add_inventory(compute3, 'DISK_GB', 500)

        # OVS agent subtree
        compute3_ovs_agent = tb.create_provider(self.context,
                                                'compute3:Open vSwitch agent',
                                                parent=compute3.uuid)
        os.environ['compute3:ovs_agent'] = compute3_ovs_agent.uuid
        tb.add_inventory(compute3_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute3_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute3_br_ex = tb.create_provider(
            self.context,
            'compute3:Open vSwitch agent:br-ex',
            parent=compute3_ovs_agent.uuid)
        os.environ['compute3:br_ex'] = compute3_br_ex.uuid
        tb.add_inventory(compute3_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        compute3_br_ex2 = tb.create_provider(
            self.context,
            'compute3:Open vSwitch agent:br-ex2',
            parent=compute3_ovs_agent.uuid)
        os.environ['compute3:br_ex2'] = compute3_br_ex2.uuid
        tb.add_inventory(compute3_br_ex2, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_br_ex2, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_br_ex2,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S2',
        )

        # SRIOV agent subtree
        compute3_sriov_agent = tb.create_provider(self.context,
                                                  'compute3:NIC Switch agent',
                                                  parent=compute3.uuid)
        os.environ['compute3:sriov_agent'] = compute2_sriov_agent.uuid
        tb.set_traits(
            compute3_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute3_pf0 = tb.create_provider(
            self.context,
            'compute3:NIC Switch agent:enp129s0f0',
            parent=compute3_sriov_agent.uuid)
        os.environ['compute3:pf0'] = compute3_pf0.uuid
        tb.add_inventory(compute3_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        compute3_pf1 = tb.create_provider(
            self.context,
            'compute3:NIC Switch agent:enp129s0f1',
            parent=compute3_sriov_agent.uuid)
        os.environ['compute3:pf1'] = compute3_pf1.uuid
        tb.add_inventory(compute3_pf1, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_pf1, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_pf1,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S2',
        )
    def test_create_exceeding_capacity_allocation(self):
        """Tests on a list of allocations which contains an invalid allocation
        exceeds resource provider's capacity.

        Expect InvalidAllocationCapacityExceeded to be raised and all
        allocations in the list should not be applied.

        """
        empty_rp = self._create_provider('empty_rp')
        full_rp = self._create_provider('full_rp')

        for rp in (empty_rp, full_rp):
            tb.add_inventory(rp, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(rp, orc.MEMORY_MB, 1024,
                             min_unit=64,
                             max_unit=1024,
                             step_size=64)

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # First create a allocation to consume full_rp's resource.
        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=full_rp,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=full_rp,
                resource_class=orc.MEMORY_MB,
                used=1024)
        ]
        alloc_obj.replace_all(self.ctx, alloc_list)

        # Create a consumer representing the second instance
        inst2_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance2, user=self.user_obj,
            project=self.project_obj)
        inst2_consumer.create()

        # Create an allocation list consisting of valid requests and an invalid
        # request exceeding the memory full_rp can provide.
        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=empty_rp,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=empty_rp,
                resource_class=orc.MEMORY_MB,
                used=512),
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=full_rp,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst2_consumer,
                resource_provider=full_rp,
                resource_class=orc.MEMORY_MB,
                used=512),
        ]

        self.assertRaises(exception.InvalidAllocationCapacityExceeded,
                          alloc_obj.replace_all, self.ctx, alloc_list)

        # Make sure that allocations of both empty_rp and full_rp remain
        # unchanged.
        allocations = alloc_obj.get_all_by_resource_provider(self.ctx, full_rp)
        self.assertEqual(2, len(allocations))

        allocations = alloc_obj.get_all_by_resource_provider(
            self.ctx, empty_rp)
        self.assertEqual(0, len(allocations))
    def test_set_allocations_retry(self, mock_log):
        """Test server side allocation write retry handling."""

        # Create a single resource provider and give it some inventory.
        rp1 = self._create_provider('rp1')
        tb.add_inventory(rp1, orc.VCPU, 24,
                         allocation_ratio=16.0)
        tb.add_inventory(rp1, orc.MEMORY_MB, 1024,
                         min_unit=64,
                         max_unit=1024,
                         step_size=64)
        original_generation = rp1.generation
        # Verify the generation is what we expect (we'll be checking again
        # later).
        self.assertEqual(2, original_generation)

        # Create a consumer and have it make an allocation.
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        alloc_list = [
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=rp1,
                resource_class=orc.VCPU,
                used=12),
            alloc_obj.Allocation(
                consumer=inst_consumer,
                resource_provider=rp1,
                resource_class=orc.MEMORY_MB,
                used=1024)
        ]

        # Make sure the right exception happens when the retry loop expires.
        self.conf_fixture.config(allocation_conflict_retry_count=0,
                                 group='placement')
        self.assertRaises(
            exception.ResourceProviderConcurrentUpdateDetected,
            alloc_obj.replace_all, self.ctx, alloc_list)
        mock_log.warning.assert_called_with(
            'Exceeded retry limit of %d on allocations write', 0)

        # Make sure the right thing happens after a small number of failures.
        # There's a bit of mock magic going on here to enusre that we can
        # both do some side effects on _set_allocations as well as have the
        # real behavior. Two generation conflicts and then a success.
        mock_log.reset_mock()
        self.conf_fixture.config(allocation_conflict_retry_count=3,
                                 group='placement')
        unmocked_set = alloc_obj._set_allocations
        with mock.patch('placement.objects.allocation.'
                        '_set_allocations') as mock_set:
            exceptions = iter([
                exception.ResourceProviderConcurrentUpdateDetected(),
                exception.ResourceProviderConcurrentUpdateDetected(),
            ])

            def side_effect(*args, **kwargs):
                try:
                    raise next(exceptions)
                except StopIteration:
                    return unmocked_set(*args, **kwargs)

            mock_set.side_effect = side_effect
            alloc_obj.replace_all(self.ctx, alloc_list)
            self.assertEqual(2, mock_log.debug.call_count)
            mock_log.debug.called_with(
                'Retrying allocations write on resource provider '
                'generation conflict')
            self.assertEqual(3, mock_set.call_count)

        # Confirm we're using a different rp object after the change
        # and that it has a higher generation.
        new_rp = alloc_list[0].resource_provider
        self.assertEqual(original_generation, rp1.generation)
        self.assertEqual(original_generation + 1, new_rp.generation)
Exemple #22
0
    def test_reshape(self):
        """We set up the following scenario:

        BEFORE: single compute node setup

          A single compute node with:
            - VCPU, MEMORY_MB, DISK_GB inventory
            - Two instances consuming CPU, RAM and DISK from that compute node

        AFTER: hierarchical + shared storage setup

          A compute node parent provider with:
            - MEMORY_MB
          Two NUMA node child providers containing:
            - VCPU
          Shared storage provider with:
            - DISK_GB
          Both instances have their resources split among the providers and
          shared storage accordingly
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i1_uuid, user=self.user_obj,
            project=self.project_obj)
        i1_consumer.create()

        i2_uuid = uuids.instance2
        i2_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i2_uuid, user=self.user_obj,
            project=self.project_obj)
        i2_consumer.create()

        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate both instances against the single compute node
        for consumer in (i1_consumer, i2_consumer):
            allocs = [
                alloc_obj.Allocation(
                    resource_provider=cn1,
                    resource_class='VCPU', consumer=consumer, used=2),
                alloc_obj.Allocation(
                    resource_provider=cn1,
                    resource_class='MEMORY_MB', consumer=consumer, used=1024),
                alloc_obj.Allocation(
                    resource_provider=cn1,
                    resource_class='DISK_GB', consumer=consumer, used=100),
            ]
            alloc_obj.replace_all(self.ctx, allocs)

        # Verify we have the allocations we expect for the BEFORE scenario
        before_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid)
        self.assertEqual(3, len(before_allocs_i1))
        self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid)
        before_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid)
        self.assertEqual(3, len(before_allocs_i2))
        self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid)

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1: [
                inv_obj.Inventory(
                    resource_provider=cn1,
                    resource_class='MEMORY_MB', total=32768, reserved=0,
                    max_unit=32768, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # each NUMA node gets half of the CPUs
            cn1_numa0: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa0,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            cn1_numa1: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa1,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # The sharing provider gets a bunch of disk
            ss: [
                inv_obj.Inventory(
                    resource_provider=ss,
                    resource_class='DISK_GB', total=100000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid)
        after_allocs = [
            # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            alloc_obj.Allocation(
                resource_provider=cn1_numa0, resource_class='VCPU',
                consumer=i1_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i1_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=ss, resource_class='DISK_GB',
                consumer=i1_consumer, used=100),
            # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            alloc_obj.Allocation(
                resource_provider=cn1_numa1, resource_class='VCPU',
                consumer=i2_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i2_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=ss, resource_class='DISK_GB',
                consumer=i2_consumer, used=100),
        ]
        reshaper.reshape(self.ctx, after_inventories, after_allocs)

        # Verify that the inventories have been moved to the appropriate
        # providers in the AFTER scenario

        # The root compute node should only have MEMORY_MB, nothing else
        cn1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1)
        self.assertEqual(1, len(cn1_inv))
        self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class)
        self.assertEqual(32768, cn1_inv[0].total)
        # Each NUMA node should only have half the original VCPU, nothing else
        numa0_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa0)
        self.assertEqual(1, len(numa0_inv))
        self.assertEqual('VCPU', numa0_inv[0].resource_class)
        self.assertEqual(8, numa0_inv[0].total)
        numa1_inv = inv_obj.get_all_by_resource_provider(self.ctx, cn1_numa1)
        self.assertEqual(1, len(numa1_inv))
        self.assertEqual('VCPU', numa1_inv[0].resource_class)
        self.assertEqual(8, numa1_inv[0].total)
        # The sharing storage provider should only have DISK_GB, nothing else
        ss_inv = inv_obj.get_all_by_resource_provider(self.ctx, ss)
        self.assertEqual(1, len(ss_inv))
        self.assertEqual('DISK_GB', ss_inv[0].resource_class)
        self.assertEqual(100000, ss_inv[0].total)

        # Verify we have the allocations we expect for the AFTER scenario
        after_allocs_i1 = alloc_obj.get_all_by_consumer_id(self.ctx, i1_uuid)
        self.assertEqual(3, len(after_allocs_i1))
        # Our VCPU allocation should be in the NUMA0 node
        vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)

        after_allocs_i2 = alloc_obj.get_all_by_consumer_id(self.ctx, i2_uuid)
        self.assertEqual(3, len(after_allocs_i2))
        # Our VCPU allocation should be in the NUMA1 node
        vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
Exemple #23
0
    def test_reshape_concurrent_inventory_update(self):
        """Valid failure scenario for reshape(). We test a situation where the
        virt driver has constructed it's "after inventories and allocations"
        and sent those to the POST /reshape endpoint. The reshape POST handler
        does a quick check of the resource provider generations sent in the
        payload and they all check out.

        However, right before the call to resource_provider.reshape(), another
        thread legitimately changes the inventory of one of the providers
        involved in the reshape transaction. We should get a
        ConcurrentUpdateDetected in this case.
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i1_uuid, user=self.user_obj,
            project=self.project_obj)
        i1_consumer.create()

        # then all our original providers
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate an instance on our compute node
        allocs = [
            alloc_obj.Allocation(
                resource_provider=cn1,
                resource_class='VCPU', consumer=i1_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1,
                resource_class='MEMORY_MB', consumer=i1_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=cn1,
                resource_class='DISK_GB', consumer=i1_consumer, used=100),
        ]
        alloc_obj.replace_all(self.ctx, allocs)

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1: [
                inv_obj.Inventory(
                    resource_provider=cn1,
                    resource_class='MEMORY_MB', total=32768, reserved=0,
                    max_unit=32768, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # each NUMA node gets half of the CPUs
            cn1_numa0: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa0,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            cn1_numa1: [
                inv_obj.Inventory(
                    resource_provider=cn1_numa1,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
            # The sharing provider gets a bunch of disk
            ss: [
                inv_obj.Inventory(
                    resource_provider=ss,
                    resource_class='DISK_GB', total=100000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ],
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        after_allocs = [
            # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            alloc_obj.Allocation(
                resource_provider=cn1_numa0, resource_class='VCPU',
                consumer=i1_consumer, used=2),
            alloc_obj.Allocation(
                resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i1_consumer, used=1024),
            alloc_obj.Allocation(
                resource_provider=ss, resource_class='DISK_GB',
                consumer=i1_consumer, used=100),
        ]

        # OK, now before we call reshape(), here we emulate another thread
        # changing the inventory for the sharing storage provider in between
        # the time in the REST handler when the sharing storage provider's
        # generation was validated and the actual call to reshape()
        ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid)
        # Reduce the amount of storage to 2000, from 100000.
        new_ss_inv = [
            inv_obj.Inventory(
                resource_provider=ss_threadB, resource_class='DISK_GB',
                total=2000, reserved=0, max_unit=1000, min_unit=1, step_size=1,
                allocation_ratio=1.0)]
        ss_threadB.set_inventory(new_ss_inv)
        # Double check our storage provider's generation is now greater than
        # the original storage provider record being sent to reshape()
        self.assertGreater(ss_threadB.generation, ss.generation)

        # And we should legitimately get a failure now to reshape() due to
        # another thread updating one of the involved provider's generations
        self.assertRaises(
            exception.ConcurrentUpdateDetected,
            reshaper.reshape, self.ctx, after_inventories, after_allocs)
    def test_allocation_list_create(self):
        max_unit = 10
        consumer_uuid = uuidsentinel.consumer

        # Create a consumer representing the instance
        inst_consumer = consumer_obj.Consumer(
            self.ctx, uuid=consumer_uuid, user=self.user_obj,
            project=self.project_obj)
        inst_consumer.create()

        # Create two resource providers
        rp1_name = uuidsentinel.rp1_name
        rp1_uuid = uuidsentinel.rp1_uuid
        rp1_class = orc.DISK_GB
        rp1_used = 6

        rp2_name = uuidsentinel.rp2_name
        rp2_uuid = uuidsentinel.rp2_uuid
        rp2_class = orc.IPV4_ADDRESS
        rp2_used = 2

        rp1 = self._create_provider(rp1_name, uuid=rp1_uuid)
        rp2 = self._create_provider(rp2_name, uuid=rp2_uuid)

        # Two allocations, one for each resource provider.
        allocation_1 = alloc_obj.Allocation(
            resource_provider=rp1, consumer=inst_consumer,
            resource_class=rp1_class, used=rp1_used)
        allocation_2 = alloc_obj.Allocation(
            resource_provider=rp2, consumer=inst_consumer,
            resource_class=rp2_class, used=rp2_used)
        allocation_list = [allocation_1, allocation_2]

        # There's no inventory, we have a failure.
        error = self.assertRaises(exception.InvalidInventory,
                                  alloc_obj.replace_all, self.ctx,
                                  allocation_list)
        # Confirm that the resource class string, not index, is in
        # the exception and resource providers are listed by uuid.
        self.assertIn(rp1_class, str(error))
        self.assertIn(rp2_class, str(error))
        self.assertIn(rp1.uuid, str(error))
        self.assertIn(rp2.uuid, str(error))

        # Add inventory for one of the two resource providers. This should also
        # fail, since rp2 has no inventory.
        tb.add_inventory(rp1, rp1_class, 1024, max_unit=1)
        self.assertRaises(exception.InvalidInventory,
                          alloc_obj.replace_all, self.ctx, allocation_list)

        # Add inventory for the second resource provider
        tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1)

        # Now the allocations will still fail because max_unit 1
        self.assertRaises(exception.InvalidAllocationConstraintsViolated,
                          alloc_obj.replace_all, self.ctx, allocation_list)
        inv1 = inv_obj.Inventory(resource_provider=rp1,
                                 resource_class=rp1_class,
                                 total=1024, max_unit=max_unit)
        rp1.set_inventory([inv1])
        inv2 = inv_obj.Inventory(resource_provider=rp2,
                                 resource_class=rp2_class,
                                 total=255, reserved=2, max_unit=max_unit)
        rp2.set_inventory([inv2])

        # Now we can finally allocate.
        alloc_obj.replace_all(self.ctx, allocation_list)

        # Check that those allocations changed usage on each
        # resource provider.
        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        rp2_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp2_uuid)
        self.assertEqual(rp1_used, rp1_usage[0].usage)
        self.assertEqual(rp2_used, rp2_usage[0].usage)

        # redo one allocation
        # TODO(cdent): This does not currently behave as expected
        # because a new allocation is created, adding to the total
        # used, not replacing.
        rp1_used += 1
        self.allocate_from_provider(
            rp1, rp1_class, rp1_used, consumer=inst_consumer)

        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        self.assertEqual(rp1_used, rp1_usage[0].usage)

        # delete the allocations for the consumer
        # NOTE(cdent): The database uses 'consumer_id' for the
        # column, presumably because some ids might not be uuids, at
        # some point in the future.
        consumer_allocations = alloc_obj.get_all_by_consumer_id(
            self.ctx, consumer_uuid)
        alloc_obj.delete_all(self.ctx, consumer_allocations)

        rp1_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp1_uuid)
        rp2_usage = usage_obj.get_all_by_resource_provider_uuid(
            self.ctx, rp2_uuid)
        self.assertEqual(0, rp1_usage[0].usage)
        self.assertEqual(0, rp2_usage[0].usage)
    def test_shared_provider_capacity(self):
        """Sets up a resource provider that shares DISK_GB inventory via an
        aggregate, a couple resource providers representing "local disk"
        compute nodes and ensures the _get_providers_sharing_capacity()
        function finds that provider and not providers of "local disk".
        """
        # Create the two "local disk" compute node providers
        cn1 = self._create_provider('cn1')
        cn2 = self._create_provider('cn2')

        # Populate the two compute node providers with inventory.  One has
        # DISK_GB.  Both should be excluded from the result (one doesn't have
        # the requested resource; but neither is a sharing provider).
        for cn in (cn1, cn2):
            tb.add_inventory(cn, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, orc.MEMORY_MB, 32768,
                             min_unit=64,
                             max_unit=32768,
                             step_size=64,
                             allocation_ratio=1.5)
            if cn is cn1:
                tb.add_inventory(cn, orc.DISK_GB, 2000,
                                 min_unit=100,
                                 max_unit=2000,
                                 step_size=10)

        # Create the shared storage pool
        ss1 = self._create_provider('shared storage 1')
        ss2 = self._create_provider('shared storage 2')

        # Give the shared storage pool some inventory of DISK_GB
        for ss, disk_amount in ((ss1, 2000), (ss2, 1000)):
            tb.add_inventory(ss, orc.DISK_GB, disk_amount,
                             min_unit=100,
                             max_unit=2000,
                             step_size=10)
            # Mark the shared storage pool as having inventory shared among
            # any provider associated via aggregate
            tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")

        # OK, now that has all been set up, let's verify that we get the ID of
        # the shared storage pool
        got_ids = res_ctx.get_sharing_providers(self.ctx)
        self.assertEqual(set([ss1.id, ss2.id]), got_ids)

        request = placement_lib.RequestGroup(
            use_same_provider=False,
            resources={orc.VCPU: 2,
                       orc.MEMORY_MB: 256,
                       orc.DISK_GB: 1500})
        has_trees = res_ctx._has_provider_trees(self.ctx)
        sharing = res_ctx.get_sharing_providers(self.ctx)
        rg_ctx = res_ctx.RequestGroupSearchContext(
            self.ctx, request, has_trees, sharing)

        VCPU_ID = orc.STANDARDS.index(orc.VCPU)
        DISK_GB_ID = orc.STANDARDS.index(orc.DISK_GB)

        rps_sharing_vcpu = rg_ctx.get_rps_with_shared_capacity(VCPU_ID)
        self.assertEqual(set(), rps_sharing_vcpu)

        rps_sharing_dist = rg_ctx.get_rps_with_shared_capacity(DISK_GB_ID)
        self.assertEqual(set([ss1.id]), rps_sharing_dist)
    def test_get_all_by_filters_with_resources(self):
        for rp_i in ['1', '2']:
            rp = self._create_provider('rp_' + rp_i)
            tb.add_inventory(rp, orc.VCPU, 2)
            tb.add_inventory(rp, orc.DISK_GB, 1024,
                             reserved=2)
            # Write a specific inventory for testing min/max units and steps
            tb.add_inventory(rp, orc.MEMORY_MB, 1024,
                             reserved=2, min_unit=2, max_unit=4, step_size=2)

            # Create the VCPU allocation only for the first RP
            if rp_i != '1':
                continue
            self.allocate_from_provider(rp, orc.VCPU, used=1)

        # Both RPs should accept that request given the only current allocation
        # for the first RP is leaving one VCPU
        filters = {'resources': {orc.VCPU: 1}}
        expected_rps = ['rp_1', 'rp_2']
        self._run_get_all_by_filters(expected_rps, filters=filters)
        # Now, when asking for 2 VCPUs, only the second RP should accept that
        # given the current allocation for the first RP
        filters = {'resources': {orc.VCPU: 2}}
        expected_rps = ['rp_2']
        self._run_get_all_by_filters(expected_rps, filters=filters)
        # Adding a second resource request should be okay for the 2nd RP
        # given it has enough disk but we also need to make sure that the
        # first RP is not acceptable because of the VCPU request
        filters = {'resources': {orc.VCPU: 2, orc.DISK_GB: 1022}}
        expected_rps = ['rp_2']
        self._run_get_all_by_filters(expected_rps, filters=filters)
        # Now, we are asking for both disk and VCPU resources that all the RPs
        # can't accept (as the 2nd RP is having a reserved size)
        filters = {'resources': {orc.VCPU: 2, orc.DISK_GB: 1024}}
        expected_rps = []
        self._run_get_all_by_filters(expected_rps, filters=filters)

        # We also want to verify that asking for a specific RP can also be
        # checking the resource usage.
        filters = {'name': u'rp_1', 'resources': {orc.VCPU: 1}}
        expected_rps = ['rp_1']
        self._run_get_all_by_filters(expected_rps, filters=filters)

        # Let's verify that the min and max units are checked too
        # Case 1: amount is in between min and max and modulo step_size
        filters = {'resources': {orc.MEMORY_MB: 2}}
        expected_rps = ['rp_1', 'rp_2']
        self._run_get_all_by_filters(expected_rps, filters=filters)

        # Case 2: amount is less than min_unit
        filters = {'resources': {orc.MEMORY_MB: 1}}
        expected_rps = []
        self._run_get_all_by_filters(expected_rps, filters=filters)

        # Case 3: amount is more than min_unit
        filters = {'resources': {orc.MEMORY_MB: 5}}
        expected_rps = []
        self._run_get_all_by_filters(expected_rps, filters=filters)

        # Case 4: amount is not modulo step_size
        filters = {'resources': {orc.MEMORY_MB: 3}}
        expected_rps = []
        self._run_get_all_by_filters(expected_rps, filters=filters)
    def test_nested_providers(self):
        """Create a hierarchy of resource providers and run through a series of
        tests that ensure one cannot delete a resource provider that has no
        direct allocations but its child providers do have allocations.
        """
        root_rp = self._create_provider('root_rp')
        child_rp = self._create_provider('child_rp',
                                         parent=uuidsentinel.root_rp)
        grandchild_rp = self._create_provider('grandchild_rp',
                                              parent=uuidsentinel.child_rp)

        # Verify that the root_provider_uuid of both the child and the
        # grandchild is the UUID of the grandparent
        self.assertEqual(root_rp.uuid, child_rp.root_provider_uuid)
        self.assertEqual(root_rp.uuid, grandchild_rp.root_provider_uuid)

        # Create some inventory in the grandchild, allocate some consumers to
        # the grandchild and then attempt to delete the root provider and child
        # provider, both of which should fail.
        tb.add_inventory(grandchild_rp, orc.VCPU, 1)

        # Check all providers returned when getting by root UUID
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.root_rp,
            }
        )
        self.assertEqual(3, len(rps))

        # Check all providers returned when getting by child UUID
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.child_rp,
            }
        )
        self.assertEqual(3, len(rps))

        # Check all providers returned when getting by grandchild UUID
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(3, len(rps))

        # Make sure that the member_of and uuid filters work with the in_tree
        # filter

        # No aggregate associations yet, so expect no records when adding a
        # member_of filter
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'member_of': [[uuidsentinel.agg]],
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(0, len(rps))

        # OK, associate the grandchild with an aggregate and verify that ONLY
        # the grandchild is returned when asking for the grandchild's tree
        # along with the aggregate as member_of
        grandchild_rp.set_aggregates([uuidsentinel.agg])
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'member_of': [[uuidsentinel.agg]],
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)

        # Try filtering on an unknown UUID and verify no results
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'uuid': uuidsentinel.unknown_rp,
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(0, len(rps))

        # And now check that filtering for just the child's UUID along with the
        # tree produces just a single provider (the child)
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'uuid': uuidsentinel.child_rp,
                'in_tree': uuidsentinel.grandchild_rp,
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.child_rp, rps[0].uuid)

        # Ensure that the resources filter also continues to work properly with
        # the in_tree filter. Request resources that none of the providers
        # currently have and ensure no providers are returned
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.grandchild_rp,
                'resources': {
                    'VCPU': 200,
                }
            }
        )
        self.assertEqual(0, len(rps))

        # And now ask for one VCPU, which should only return us the grandchild
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.grandchild_rp,
                'resources': {
                    'VCPU': 1,
                }
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)

        # Finally, verify we still get the grandchild if filtering on the
        # parent's UUID as in_tree
        rps = rp_obj.get_all_by_filters(
            self.ctx,
            filters={
                'in_tree': uuidsentinel.child_rp,
                'resources': {
                    'VCPU': 1,
                }
            }
        )
        self.assertEqual(1, len(rps))
        self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)

        alloc_list = self.allocate_from_provider(
            grandchild_rp, orc.VCPU, 1)

        self.assertRaises(exception.CannotDeleteParentResourceProvider,
                          root_rp.destroy)
        self.assertRaises(exception.CannotDeleteParentResourceProvider,
                          child_rp.destroy)

        # Cannot delete provider if it has allocations
        self.assertRaises(exception.ResourceProviderInUse,
                          grandchild_rp.destroy)

        # Now remove the allocations against the child and check that we can
        # now delete the child provider
        alloc_obj.delete_all(self.ctx, alloc_list)
        grandchild_rp.destroy()
        child_rp.destroy()
        root_rp.destroy()
    def test_provider_modify_inventory(self):
        rp = self._create_provider(uuidsentinel.rp_name)
        saved_generation = rp.generation

        disk_inv = tb.add_inventory(rp, orc.DISK_GB, 1024,
                                    reserved=15,
                                    min_unit=10,
                                    max_unit=100,
                                    step_size=10)

        vcpu_inv = tb.add_inventory(rp, orc.VCPU, 12,
                                    allocation_ratio=16.0)

        # generation has bumped once for each add
        self.assertEqual(saved_generation + 2, rp.generation)
        saved_generation = rp.generation

        new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(2, len(new_inv_list))
        resource_classes = [inv.resource_class for inv in new_inv_list]
        self.assertIn(orc.VCPU, resource_classes)
        self.assertIn(orc.DISK_GB, resource_classes)

        # reset inventory to just disk_inv
        rp.set_inventory([disk_inv])

        # generation has bumped
        self.assertEqual(saved_generation + 1, rp.generation)
        saved_generation = rp.generation

        new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(1, len(new_inv_list))
        resource_classes = [inv.resource_class for inv in new_inv_list]
        self.assertNotIn(orc.VCPU, resource_classes)
        self.assertIn(orc.DISK_GB, resource_classes)
        self.assertEqual(1024, new_inv_list[0].total)

        # update existing disk inv to new settings
        disk_inv = inv_obj.Inventory(
            resource_provider=rp,
            resource_class=orc.DISK_GB,
            total=2048,
            reserved=15,
            min_unit=10,
            max_unit=100,
            step_size=10,
            allocation_ratio=1.0)
        rp.update_inventory(disk_inv)

        # generation has bumped
        self.assertEqual(saved_generation + 1, rp.generation)
        saved_generation = rp.generation

        new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(1, len(new_inv_list))
        self.assertEqual(2048, new_inv_list[0].total)

        # delete inventory
        rp.delete_inventory(orc.DISK_GB)

        # generation has bumped
        self.assertEqual(saved_generation + 1, rp.generation)
        saved_generation = rp.generation

        new_inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp)
        result = inv_obj.find(new_inv_list, orc.DISK_GB)
        self.assertIsNone(result)
        self.assertRaises(exception.NotFound, rp.delete_inventory,
                          orc.DISK_GB)

        # check inventory list is empty
        inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(0, len(inv_list))

        # add some inventory
        rp.add_inventory(vcpu_inv)
        inv_list = inv_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(1, len(inv_list))

        # generation has bumped
        self.assertEqual(saved_generation + 1, rp.generation)
        saved_generation = rp.generation

        # add same inventory again
        self.assertRaises(db_exc.DBDuplicateEntry,
                          rp.add_inventory, vcpu_inv)

        # generation has not bumped
        self.assertEqual(saved_generation, rp.generation)

        # fail when generation wrong
        rp.generation = rp.generation - 1
        self.assertRaises(exception.ConcurrentUpdateDetected,
                          rp.set_inventory, inv_list)