Esempio n. 1
0
    def start_fixture(self):
        super(AllocationFixture, self).start_fixture()

        # For use creating and querying allocations/usages
        os.environ['ALT_USER_ID'] = uuidutils.generate_uuid()
        project_id = os.environ['PROJECT_ID']
        user_id = os.environ['USER_ID']
        alt_user_id = os.environ['ALT_USER_ID']

        user = user_obj.User(self.context, external_id=user_id)
        user.create()
        alt_user = user_obj.User(self.context, external_id=alt_user_id)
        alt_user.create()
        project = project_obj.Project(self.context, external_id=project_id)
        project.create()

        # Stealing from the super
        rp_name = os.environ['RP_NAME']
        rp_uuid = os.environ['RP_UUID']
        # Create the rp with VCPU and DISK_GB inventory
        rp = tb.create_provider(self.context, rp_name, uuid=rp_uuid)
        tb.add_inventory(rp,
                         'DISK_GB',
                         2048,
                         step_size=10,
                         min_unit=10,
                         max_unit=1000)
        tb.add_inventory(rp, 'VCPU', 10, max_unit=10)

        # Create a first consumer for the DISK_GB allocations
        consumer1 = tb.ensure_consumer(self.context, user, project)
        tb.set_allocation(self.context, rp, consumer1, {'DISK_GB': 1000})
        os.environ['CONSUMER_0'] = consumer1.uuid

        # Create a second consumer for the VCPU allocations
        consumer2 = tb.ensure_consumer(self.context, user, project)
        tb.set_allocation(self.context, rp, consumer2, {'VCPU': 6})
        os.environ['CONSUMER_ID'] = consumer2.uuid

        # Create a consumer object for a different user
        alt_consumer = tb.ensure_consumer(self.context, alt_user, project)
        os.environ['ALT_CONSUMER_ID'] = alt_consumer.uuid

        # Create a couple of allocations for a different user.
        tb.set_allocation(self.context, rp, alt_consumer, {
            'DISK_GB': 20,
            'VCPU': 1
        })

        # The ALT_RP_XXX variables are for a resource provider that has
        # not been created in the Allocation fixture
        os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid()
        os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
Esempio n. 2
0
    def start_fixture(self):
        super(AllocationFixture, self).start_fixture()

        # For use creating and querying allocations/usages
        os.environ['ALT_USER_ID'] = uuidutils.generate_uuid()
        project_id = os.environ['PROJECT_ID']
        user_id = os.environ['USER_ID']
        alt_user_id = os.environ['ALT_USER_ID']

        user = user_obj.User(self.context, external_id=user_id)
        user.create()
        alt_user = user_obj.User(self.context, external_id=alt_user_id)
        alt_user.create()
        project = project_obj.Project(self.context, external_id=project_id)
        project.create()

        # Stealing from the super
        rp_name = os.environ['RP_NAME']
        rp_uuid = os.environ['RP_UUID']
        # Create the rp with VCPU and DISK_GB inventory
        rp = tb.create_provider(self.context, rp_name, uuid=rp_uuid)
        tb.add_inventory(rp, 'DISK_GB', 2048,
                         step_size=10, min_unit=10, max_unit=1000)
        tb.add_inventory(rp, 'VCPU', 10, max_unit=10)

        # Create a first consumer for the DISK_GB allocations
        consumer1 = tb.ensure_consumer(self.context, user, project)
        tb.set_allocation(self.context, rp, consumer1, {'DISK_GB': 1000})
        os.environ['CONSUMER_0'] = consumer1.uuid

        # Create a second consumer for the VCPU allocations
        consumer2 = tb.ensure_consumer(self.context, user, project)
        tb.set_allocation(self.context, rp, consumer2, {'VCPU': 6})
        # This consumer is referenced from the gabbits
        os.environ['CONSUMER_ID'] = consumer2.uuid

        # Create a consumer object for a different user
        alt_consumer = tb.ensure_consumer(self.context, alt_user, project)
        os.environ['ALT_CONSUMER_ID'] = alt_consumer.uuid

        # Create a couple of allocations for a different user.
        tb.set_allocation(self.context, rp, alt_consumer,
                          {'DISK_GB': 20, 'VCPU': 1})

        # The ALT_RP_XXX variables are for a resource provider that has
        # not been created in the Allocation fixture
        os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid()
        os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid()
Esempio n. 3
0
    def start_fixture(self):
        super(SharedStorageFixture, self).start_fixture()
        self.context = context.get_admin_context()

        agg_uuid = uuidutils.generate_uuid()

        cn1 = tb.create_provider(self.context, 'cn1', agg_uuid)
        cn2 = tb.create_provider(self.context, 'cn2', agg_uuid)
        ss = tb.create_provider(self.context, 'ss', agg_uuid)

        numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid)
        numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
        numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
        numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)

        pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid)
        pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid)
        pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid)
        pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid)

        os.environ['AGG_UUID'] = agg_uuid

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid
        os.environ['SS_UUID'] = ss.uuid

        os.environ['NUMA1_1_UUID'] = numa1_1.uuid
        os.environ['NUMA1_2_UUID'] = numa1_2.uuid
        os.environ['NUMA2_1_UUID'] = numa2_1.uuid
        os.environ['NUMA2_2_UUID'] = numa2_2.uuid

        os.environ['PF1_1_UUID'] = pf1_1.uuid
        os.environ['PF1_2_UUID'] = pf1_2.uuid
        os.environ['PF2_1_UUID'] = pf2_1.uuid
        os.environ['PF2_2_UUID'] = pf2_2.uuid

        # Populate compute node inventory for VCPU and RAM
        for cn in (cn1, cn2):
            tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024,
                             allocation_ratio=1.5)
        tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')

        # Populate shared storage provider with DISK_GB inventory and
        # mark it shared among any provider associated via aggregate
        tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
                         reserved=100, allocation_ratio=1.0)
        tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')

        # Populate PF inventory for VF
        for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
            tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF,
                             8, allocation_ratio=1.0)
Esempio n. 4
0
    def start_fixture(self):
        super(SharedStorageFixture, self).start_fixture()
        self.context = context.get_admin_context()

        agg_uuid = uuidutils.generate_uuid()

        cn1 = tb.create_provider(self.context, 'cn1', agg_uuid)
        cn2 = tb.create_provider(self.context, 'cn2', agg_uuid)
        ss = tb.create_provider(self.context, 'ss', agg_uuid)

        numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid)
        numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
        numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
        numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)

        pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid)
        pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid)
        pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid)
        pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid)

        os.environ['AGG_UUID'] = agg_uuid

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid
        os.environ['SS_UUID'] = ss.uuid

        os.environ['NUMA1_1_UUID'] = numa1_1.uuid
        os.environ['NUMA1_2_UUID'] = numa1_2.uuid
        os.environ['NUMA2_1_UUID'] = numa2_1.uuid
        os.environ['NUMA2_2_UUID'] = numa2_2.uuid

        os.environ['PF1_1_UUID'] = pf1_1.uuid
        os.environ['PF1_2_UUID'] = pf1_2.uuid
        os.environ['PF2_1_UUID'] = pf2_1.uuid
        os.environ['PF2_2_UUID'] = pf2_2.uuid

        # Populate compute node inventory for VCPU and RAM
        for cn in (cn1, cn2):
            tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024,
                             allocation_ratio=1.5)
        tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')

        # Populate shared storage provider with DISK_GB inventory and
        # mark it shared among any provider associated via aggregate
        tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
                         reserved=100, allocation_ratio=1.0)
        tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')

        # Populate PF inventory for VF
        for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
            tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF,
                             8, allocation_ratio=1.0)
Esempio n. 5
0
    def start_fixture(self):
        super(NonSharedStorageFixture, self).start_fixture()

        aggA_uuid = uuidutils.generate_uuid()
        aggB_uuid = uuidutils.generate_uuid()
        aggC_uuid = uuidutils.generate_uuid()
        os.environ['AGGA_UUID'] = aggA_uuid
        os.environ['AGGB_UUID'] = aggB_uuid
        os.environ['AGGC_UUID'] = aggC_uuid

        cn1 = tb.create_provider(self.context, 'cn1')
        cn2 = tb.create_provider(self.context, 'cn2')

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid

        # Populate compute node inventory for VCPU, RAM and DISK
        for cn in (cn1, cn2):
            tb.add_inventory(cn, 'VCPU', 24)
            tb.add_inventory(cn, 'MEMORY_MB', 128 * 1024)
            tb.add_inventory(cn, 'DISK_GB', 2000)
Esempio n. 6
0
    def start_fixture(self):
        super(NonSharedStorageFixture, self).start_fixture()

        aggA_uuid = uuidutils.generate_uuid()
        aggB_uuid = uuidutils.generate_uuid()
        aggC_uuid = uuidutils.generate_uuid()
        os.environ['AGGA_UUID'] = aggA_uuid
        os.environ['AGGB_UUID'] = aggB_uuid
        os.environ['AGGC_UUID'] = aggC_uuid

        cn1 = tb.create_provider(self.context, 'cn1')
        cn2 = tb.create_provider(self.context, 'cn2')

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid

        # Populate compute node inventory for VCPU, RAM and DISK
        for cn in (cn1, cn2):
            tb.add_inventory(cn, 'VCPU', 24)
            tb.add_inventory(cn, 'MEMORY_MB', 128 * 1024)
            tb.add_inventory(cn, 'DISK_GB', 2000)
Esempio n. 7
0
    def start_fixture(self):
        super(GranularFixture, self).start_fixture()

        rp_obj.ResourceClass(context=self.context,
                             name='CUSTOM_NET_MBPS').create()

        os.environ['AGGA'] = uuids.aggA
        os.environ['AGGB'] = uuids.aggB
        os.environ['AGGC'] = uuids.aggC

        cn_left = tb.create_provider(self.context, 'cn_left', uuids.aggA)
        os.environ['CN_LEFT'] = cn_left.uuid
        tb.add_inventory(cn_left, 'VCPU', 8)
        tb.add_inventory(cn_left, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_left, 'DISK_GB', 500)
        tb.add_inventory(cn_left, 'VGPU', 8)
        tb.add_inventory(cn_left, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO')

        cn_middle = tb.create_provider(self.context, 'cn_middle', uuids.aggA,
                                       uuids.aggB)
        os.environ['CN_MIDDLE'] = cn_middle.uuid
        tb.add_inventory(cn_middle, 'VCPU', 8)
        tb.add_inventory(cn_middle, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_middle, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS')

        cn_right = tb.create_provider(self.context, 'cn_right', uuids.aggB,
                                      uuids.aggC)
        os.environ['CN_RIGHT'] = cn_right.uuid
        tb.add_inventory(cn_right, 'VCPU', 8)
        tb.add_inventory(cn_right, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_right, 'DISK_GB', 500)
        tb.add_inventory(cn_right, 'VGPU', 8, max_unit=2)
        tb.set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA',
                      'CUSTOM_DISK_SSD')

        shr_disk_1 = tb.create_provider(self.context, 'shr_disk_1', uuids.aggA)
        os.environ['SHR_DISK_1'] = shr_disk_1.uuid
        tb.add_inventory(shr_disk_1, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_1, 'MISC_SHARES_VIA_AGGREGATE',
                      'CUSTOM_DISK_SSD')

        shr_disk_2 = tb.create_provider(self.context, 'shr_disk_2', uuids.aggA,
                                        uuids.aggB)
        os.environ['SHR_DISK_2'] = shr_disk_2.uuid
        tb.add_inventory(shr_disk_2, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_2, 'MISC_SHARES_VIA_AGGREGATE')

        shr_net = tb.create_provider(self.context, 'shr_net', uuids.aggC)
        os.environ['SHR_NET'] = shr_net.uuid
        tb.add_inventory(shr_net, 'SRIOV_NET_VF', 16)
        tb.add_inventory(shr_net, 'CUSTOM_NET_MBPS', 40000)
        tb.set_traits(shr_net, 'MISC_SHARES_VIA_AGGREGATE')
Esempio n. 8
0
    def test_reshape_concurrent_inventory_update(self):
        """Valid failure scenario for reshape(). We test a situation where the
        virt driver has constructed it's "after inventories and allocations"
        and sent those to the POST /reshape endpoint. The reshape POST handler
        does a quick check of the resource provider generations sent in the
        payload and they all check out.

        However, right before the call to resource_provider.reshape(), another
        thread legitimately changes the inventory of one of the providers
        involved in the reshape transaction. We should get a
        ConcurrentUpdateDetected in this case.
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(self.ctx,
                                            uuid=i1_uuid,
                                            user=self.user_obj,
                                            project=self.project_obj)
        i1_consumer.create()

        # then all our original providers
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate an instance on our compute node
        allocs = [
            rp_obj.Allocation(self.ctx,
                              resource_provider=cn1,
                              resource_class='VCPU',
                              consumer=i1_consumer,
                              used=2),
            rp_obj.Allocation(self.ctx,
                              resource_provider=cn1,
                              resource_class='MEMORY_MB',
                              consumer=i1_consumer,
                              used=1024),
            rp_obj.Allocation(self.ctx,
                              resource_provider=cn1,
                              resource_class='DISK_GB',
                              consumer=i1_consumer,
                              used=100),
        ]
        alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
        alloc_list.replace_all()

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(
                                         self.ctx,
                                         resource_provider=cn1,
                                         resource_class='MEMORY_MB',
                                         total=32768,
                                         reserved=0,
                                         max_unit=32768,
                                         min_unit=1,
                                         step_size=1,
                                         allocation_ratio=1.0),
                                 ]),
            # each NUMA node gets half of the CPUs
            cn1_numa0:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(
                                         self.ctx,
                                         resource_provider=cn1_numa0,
                                         resource_class='VCPU',
                                         total=8,
                                         reserved=0,
                                         max_unit=8,
                                         min_unit=1,
                                         step_size=1,
                                         allocation_ratio=1.0),
                                 ]),
            cn1_numa1:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(
                                         self.ctx,
                                         resource_provider=cn1_numa1,
                                         resource_class='VCPU',
                                         total=8,
                                         reserved=0,
                                         max_unit=8,
                                         min_unit=1,
                                         step_size=1,
                                         allocation_ratio=1.0),
                                 ]),
            # The sharing provider gets a bunch of disk
            ss:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(self.ctx,
                                                      resource_provider=ss,
                                                      resource_class='DISK_GB',
                                                      total=100000,
                                                      reserved=0,
                                                      max_unit=1000,
                                                      min_unit=1,
                                                      step_size=1,
                                                      allocation_ratio=1.0),
                                 ]),
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        after_allocs = rp_obj.AllocationList(
            self.ctx,
            objects=[
                # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
                # from the sharing storage provider
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1_numa0,
                                  resource_class='VCPU',
                                  consumer=i1_consumer,
                                  used=2),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1,
                                  resource_class='MEMORY_MB',
                                  consumer=i1_consumer,
                                  used=1024),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=ss,
                                  resource_class='DISK_GB',
                                  consumer=i1_consumer,
                                  used=100),
            ])

        # OK, now before we call reshape(), here we emulate another thread
        # changing the inventory for the sharing storage provider in between
        # the time in the REST handler when the sharing storage provider's
        # generation was validated and the actual call to reshape()
        ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid)
        # Reduce the amount of storage to 2000, from 100000.
        new_ss_inv = rp_obj.InventoryList(self.ctx,
                                          objects=[
                                              rp_obj.Inventory(
                                                  self.ctx,
                                                  resource_provider=ss_threadB,
                                                  resource_class='DISK_GB',
                                                  total=2000,
                                                  reserved=0,
                                                  max_unit=1000,
                                                  min_unit=1,
                                                  step_size=1,
                                                  allocation_ratio=1.0)
                                          ])
        ss_threadB.set_inventory(new_ss_inv)
        # Double check our storage provider's generation is now greater than
        # the original storage provider record being sent to reshape()
        self.assertGreater(ss_threadB.generation, ss.generation)

        # And we should legitimately get a failure now to reshape() due to
        # another thread updating one of the involved provider's generations
        self.assertRaises(exception.ConcurrentUpdateDetected, rp_obj.reshape,
                          self.ctx, after_inventories, after_allocs)
Esempio n. 9
0
    def test_reshape(self):
        """We set up the following scenario:

        BEFORE: single compute node setup

          A single compute node with:
            - VCPU, MEMORY_MB, DISK_GB inventory
            - Two instances consuming CPU, RAM and DISK from that compute node

        AFTER: hierarchical + shared storage setup

          A compute node parent provider with:
            - MEMORY_MB
          Two NUMA node child providers containing:
            - VCPU
          Shared storage provider with:
            - DISK_GB
          Both instances have their resources split among the providers and
          shared storage accordingly
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(self.ctx,
                                            uuid=i1_uuid,
                                            user=self.user_obj,
                                            project=self.project_obj)
        i1_consumer.create()

        i2_uuid = uuids.instance2
        i2_consumer = consumer_obj.Consumer(self.ctx,
                                            uuid=i2_uuid,
                                            user=self.user_obj,
                                            project=self.project_obj)
        i2_consumer.create()

        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate both instances against the single compute node
        for consumer in (i1_consumer, i2_consumer):
            allocs = [
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1,
                                  resource_class='VCPU',
                                  consumer=consumer,
                                  used=2),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1,
                                  resource_class='MEMORY_MB',
                                  consumer=consumer,
                                  used=1024),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1,
                                  resource_class='DISK_GB',
                                  consumer=consumer,
                                  used=100),
            ]
            alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
            alloc_list.replace_all()

        # Verify we have the allocations we expect for the BEFORE scenario
        before_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i1_uuid)
        self.assertEqual(3, len(before_allocs_i1))
        self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid)
        before_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i2_uuid)
        self.assertEqual(3, len(before_allocs_i2))
        self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid)

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(
                                         self.ctx,
                                         resource_provider=cn1,
                                         resource_class='MEMORY_MB',
                                         total=32768,
                                         reserved=0,
                                         max_unit=32768,
                                         min_unit=1,
                                         step_size=1,
                                         allocation_ratio=1.0),
                                 ]),
            # each NUMA node gets half of the CPUs
            cn1_numa0:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(
                                         self.ctx,
                                         resource_provider=cn1_numa0,
                                         resource_class='VCPU',
                                         total=8,
                                         reserved=0,
                                         max_unit=8,
                                         min_unit=1,
                                         step_size=1,
                                         allocation_ratio=1.0),
                                 ]),
            cn1_numa1:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(
                                         self.ctx,
                                         resource_provider=cn1_numa1,
                                         resource_class='VCPU',
                                         total=8,
                                         reserved=0,
                                         max_unit=8,
                                         min_unit=1,
                                         step_size=1,
                                         allocation_ratio=1.0),
                                 ]),
            # The sharing provider gets a bunch of disk
            ss:
            rp_obj.InventoryList(self.ctx,
                                 objects=[
                                     rp_obj.Inventory(self.ctx,
                                                      resource_provider=ss,
                                                      resource_class='DISK_GB',
                                                      total=100000,
                                                      reserved=0,
                                                      max_unit=1000,
                                                      min_unit=1,
                                                      step_size=1,
                                                      allocation_ratio=1.0),
                                 ]),
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid)
        after_allocs = rp_obj.AllocationList(
            self.ctx,
            objects=[
                # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
                # from the sharing storage provider
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1_numa0,
                                  resource_class='VCPU',
                                  consumer=i1_consumer,
                                  used=2),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1,
                                  resource_class='MEMORY_MB',
                                  consumer=i1_consumer,
                                  used=1024),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=ss,
                                  resource_class='DISK_GB',
                                  consumer=i1_consumer,
                                  used=100),
                # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB
                # from the sharing storage provider
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1_numa1,
                                  resource_class='VCPU',
                                  consumer=i2_consumer,
                                  used=2),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=cn1,
                                  resource_class='MEMORY_MB',
                                  consumer=i2_consumer,
                                  used=1024),
                rp_obj.Allocation(self.ctx,
                                  resource_provider=ss,
                                  resource_class='DISK_GB',
                                  consumer=i2_consumer,
                                  used=100),
            ])
        rp_obj.reshape(self.ctx, after_inventories, after_allocs)

        # Verify that the inventories have been moved to the appropriate
        # providers in the AFTER scenario

        # The root compute node should only have MEMORY_MB, nothing else
        cn1_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, cn1)
        self.assertEqual(1, len(cn1_inv))
        self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class)
        self.assertEqual(32768, cn1_inv[0].total)
        # Each NUMA node should only have half the original VCPU, nothing else
        numa0_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, cn1_numa0)
        self.assertEqual(1, len(numa0_inv))
        self.assertEqual('VCPU', numa0_inv[0].resource_class)
        self.assertEqual(8, numa0_inv[0].total)
        numa1_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, cn1_numa1)
        self.assertEqual(1, len(numa1_inv))
        self.assertEqual('VCPU', numa1_inv[0].resource_class)
        self.assertEqual(8, numa1_inv[0].total)
        # The sharing storage provider should only have DISK_GB, nothing else
        ss_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, ss)
        self.assertEqual(1, len(ss_inv))
        self.assertEqual('DISK_GB', ss_inv[0].resource_class)
        self.assertEqual(100000, ss_inv[0].total)

        # Verify we have the allocations we expect for the AFTER scenario
        after_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i1_uuid)
        self.assertEqual(3, len(after_allocs_i1))
        # Our VCPU allocation should be in the NUMA0 node
        vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)

        after_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i2_uuid)
        self.assertEqual(3, len(after_allocs_i2))
        # Our VCPU allocation should be in the NUMA1 node
        vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
Esempio n. 10
0
    def test_delete_consumer_if_no_allocs(self):
        """AllocationList.replace_all() should attempt to delete consumers that
        no longer have any allocations. Due to the REST API not having any way
        to query for consumers directly (only via the GET
        /allocations/{consumer_uuid} endpoint which returns an empty dict even
        when no consumer record exists for the {consumer_uuid}) we need to do
        this functional test using only the object layer.
        """
        # We will use two consumers in this test, only one of which will get
        # all of its allocations deleted in a transaction (and we expect that
        # consumer record to be deleted)
        c1 = consumer_obj.Consumer(
            self.ctx, uuid=uuids.consumer1, user=self.user_obj,
            project=self.project_obj)
        c1.create()
        c2 = consumer_obj.Consumer(
            self.ctx, uuid=uuids.consumer2, user=self.user_obj,
            project=self.project_obj)
        c2.create()

        # Create some inventory that we will allocate
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8)
        tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
        tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000)

        # Now allocate some of that inventory to two different consumers
        allocs = [
            rp_obj.Allocation(
                self.ctx, consumer=c1, resource_provider=cn1,
                resource_class=fields.ResourceClass.VCPU, used=1),
            rp_obj.Allocation(
                self.ctx, consumer=c1, resource_provider=cn1,
                resource_class=fields.ResourceClass.MEMORY_MB, used=512),
            rp_obj.Allocation(
                self.ctx, consumer=c2, resource_provider=cn1,
                resource_class=fields.ResourceClass.VCPU, used=1),
            rp_obj.Allocation(
                self.ctx, consumer=c2, resource_provider=cn1,
                resource_class=fields.ResourceClass.MEMORY_MB, used=512),
        ]
        alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
        alloc_list.replace_all()

        # Validate that we have consumer records for both consumers
        for c_uuid in (uuids.consumer1, uuids.consumer2):
            c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid)
            self.assertIsNotNone(c_obj)

        # OK, now "remove" the allocation for consumer2 by setting the used
        # value for both allocated resources to 0 and re-running the
        # AllocationList.replace_all(). This should end up deleting the
        # consumer record for consumer2
        allocs = [
            rp_obj.Allocation(
                self.ctx, consumer=c2, resource_provider=cn1,
                resource_class=fields.ResourceClass.VCPU, used=0),
            rp_obj.Allocation(
                self.ctx, consumer=c2, resource_provider=cn1,
                resource_class=fields.ResourceClass.MEMORY_MB, used=0),
        ]
        alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
        alloc_list.replace_all()

        # consumer1 should still exist...
        c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1)
        self.assertIsNotNone(c_obj)

        # but not consumer2...
        self.assertRaises(
            exception.NotFound, consumer_obj.Consumer.get_by_uuid,
            self.ctx, uuids.consumer2)

        # DELETE /allocations/{consumer_uuid} is the other place where we
        # delete all allocations for a consumer. Let's delete all for consumer1
        # and check that the consumer record is deleted
        alloc_list = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, uuids.consumer1)
        alloc_list.delete_all()

        # consumer1 should no longer exist in the DB since we just deleted all
        # of its allocations
        self.assertRaises(
            exception.NotFound, consumer_obj.Consumer.get_by_uuid,
            self.ctx, uuids.consumer1)
Esempio n. 11
0
    def test_delete_consumer_if_no_allocs(self):
        """AllocationList.replace_all() should attempt to delete consumers that
        no longer have any allocations. Due to the REST API not having any way
        to query for consumers directly (only via the GET
        /allocations/{consumer_uuid} endpoint which returns an empty dict even
        when no consumer record exists for the {consumer_uuid}) we need to do
        this functional test using only the object layer.
        """
        # We will use two consumers in this test, only one of which will get
        # all of its allocations deleted in a transaction (and we expect that
        # consumer record to be deleted)
        c1 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer1,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c1.create()
        c2 = consumer_obj.Consumer(self.ctx,
                                   uuid=uuids.consumer2,
                                   user=self.user_obj,
                                   project=self.project_obj)
        c2.create()

        # Create some inventory that we will allocate
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8)
        tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
        tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000)

        # Now allocate some of that inventory to two different consumers
        allocs = [
            rp_obj.Allocation(self.ctx,
                              consumer=c1,
                              resource_provider=cn1,
                              resource_class=fields.ResourceClass.VCPU,
                              used=1),
            rp_obj.Allocation(self.ctx,
                              consumer=c1,
                              resource_provider=cn1,
                              resource_class=fields.ResourceClass.MEMORY_MB,
                              used=512),
            rp_obj.Allocation(self.ctx,
                              consumer=c2,
                              resource_provider=cn1,
                              resource_class=fields.ResourceClass.VCPU,
                              used=1),
            rp_obj.Allocation(self.ctx,
                              consumer=c2,
                              resource_provider=cn1,
                              resource_class=fields.ResourceClass.MEMORY_MB,
                              used=512),
        ]
        alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
        alloc_list.replace_all()

        # Validate that we have consumer records for both consumers
        for c_uuid in (uuids.consumer1, uuids.consumer2):
            c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid)
            self.assertIsNotNone(c_obj)

        # OK, now "remove" the allocation for consumer2 by setting the used
        # value for both allocated resources to 0 and re-running the
        # AllocationList.replace_all(). This should end up deleting the
        # consumer record for consumer2
        allocs = [
            rp_obj.Allocation(self.ctx,
                              consumer=c2,
                              resource_provider=cn1,
                              resource_class=fields.ResourceClass.VCPU,
                              used=0),
            rp_obj.Allocation(self.ctx,
                              consumer=c2,
                              resource_provider=cn1,
                              resource_class=fields.ResourceClass.MEMORY_MB,
                              used=0),
        ]
        alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
        alloc_list.replace_all()

        # consumer1 should still exist...
        c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1)
        self.assertIsNotNone(c_obj)

        # but not consumer2...
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer2)

        # DELETE /allocations/{consumer_uuid} is the other place where we
        # delete all allocations for a consumer. Let's delete all for consumer1
        # and check that the consumer record is deleted
        alloc_list = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, uuids.consumer1)
        alloc_list.delete_all()

        # consumer1 should no longer exist in the DB since we just deleted all
        # of its allocations
        self.assertRaises(exception.NotFound,
                          consumer_obj.Consumer.get_by_uuid, self.ctx,
                          uuids.consumer1)
Esempio n. 12
0
    def test_reshape(self):
        """We set up the following scenario:

        BEFORE: single compute node setup

          A single compute node with:
            - VCPU, MEMORY_MB, DISK_GB inventory
            - Two instances consuming CPU, RAM and DISK from that compute node

        AFTER: hierarchical + shared storage setup

          A compute node parent provider with:
            - MEMORY_MB
          Two NUMA node child providers containing:
            - VCPU
          Shared storage provider with:
            - DISK_GB
          Both instances have their resources split among the providers and
          shared storage accordingly
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i1_uuid, user=self.user_obj,
            project=self.project_obj)
        i1_consumer.create()

        i2_uuid = uuids.instance2
        i2_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i2_uuid, user=self.user_obj,
            project=self.project_obj)
        i2_consumer.create()

        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate both instances against the single compute node
        for consumer in (i1_consumer, i2_consumer):
            allocs = [
                rp_obj.Allocation(
                    self.ctx, resource_provider=cn1,
                    resource_class='VCPU', consumer=consumer, used=2),
                rp_obj.Allocation(
                    self.ctx, resource_provider=cn1,
                    resource_class='MEMORY_MB', consumer=consumer, used=1024),
                rp_obj.Allocation(
                    self.ctx, resource_provider=cn1,
                    resource_class='DISK_GB', consumer=consumer, used=100),
            ]
            alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
            alloc_list.replace_all()

        # Verify we have the allocations we expect for the BEFORE scenario
        before_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i1_uuid)
        self.assertEqual(3, len(before_allocs_i1))
        self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid)
        before_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i2_uuid)
        self.assertEqual(3, len(before_allocs_i2))
        self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid)

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=cn1,
                    resource_class='MEMORY_MB', total=32768, reserved=0,
                    max_unit=32768, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
            # each NUMA node gets half of the CPUs
            cn1_numa0.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=cn1_numa0,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
            cn1_numa1.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=cn1_numa1,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
            # The sharing provider gets a bunch of disk
            ss.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=ss,
                    resource_class='DISK_GB', total=100000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid)
        after_allocs = rp_obj.AllocationList(self.ctx, objects=[
            # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1_numa0, resource_class='VCPU',
                consumer=i1_consumer, used=2),
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i1_consumer, used=1024),
            rp_obj.Allocation(
                self.ctx, resource_provider=ss, resource_class='DISK_GB',
                consumer=i1_consumer, used=100),
            # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1_numa1, resource_class='VCPU',
                consumer=i2_consumer, used=2),
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i2_consumer, used=1024),
            rp_obj.Allocation(
                self.ctx, resource_provider=ss, resource_class='DISK_GB',
                consumer=i2_consumer, used=100),
        ])
        rp_obj.reshape(self.ctx, after_inventories, after_allocs)

        # Verify that the inventories have been moved to the appropriate
        # providers in the AFTER scenario

        # The root compute node should only have MEMORY_MB, nothing else
        cn1_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, cn1)
        self.assertEqual(1, len(cn1_inv))
        self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class)
        self.assertEqual(32768, cn1_inv[0].total)
        # Each NUMA node should only have half the original VCPU, nothing else
        numa0_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, cn1_numa0)
        self.assertEqual(1, len(numa0_inv))
        self.assertEqual('VCPU', numa0_inv[0].resource_class)
        self.assertEqual(8, numa0_inv[0].total)
        numa1_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, cn1_numa1)
        self.assertEqual(1, len(numa1_inv))
        self.assertEqual('VCPU', numa1_inv[0].resource_class)
        self.assertEqual(8, numa1_inv[0].total)
        # The sharing storage provider should only have DISK_GB, nothing else
        ss_inv = rp_obj.InventoryList.get_all_by_resource_provider(
            self.ctx, ss)
        self.assertEqual(1, len(ss_inv))
        self.assertEqual('DISK_GB', ss_inv[0].resource_class)
        self.assertEqual(100000, ss_inv[0].total)

        # Verify we have the allocations we expect for the AFTER scenario
        after_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i1_uuid)
        self.assertEqual(3, len(after_allocs_i1))
        # Our VCPU allocation should be in the NUMA0 node
        vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)

        after_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id(
            self.ctx, i2_uuid)
        self.assertEqual(3, len(after_allocs_i2))
        # Our VCPU allocation should be in the NUMA1 node
        vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU')
        self.assertIsNotNone(vcpu_alloc)
        self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid)
        # Our DISK_GB allocation should be in the sharing provider
        disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB')
        self.assertIsNotNone(disk_alloc)
        self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid)
        # And our MEMORY_MB should remain on the root compute node
        ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB')
        self.assertIsNotNone(ram_alloc)
        self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid)
Esempio n. 13
0
    def test_reshape_concurrent_inventory_update(self):
        """Valid failure scenario for reshape(). We test a situation where the
        virt driver has constructed it's "after inventories and allocations"
        and sent those to the POST /reshape endpoint. The reshape POST handler
        does a quick check of the resource provider generations sent in the
        payload and they all check out.

        However, right before the call to resource_provider.reshape(), another
        thread legitimately changes the inventory of one of the providers
        involved in the reshape transaction. We should get a
        ConcurrentUpdateDetected in this case.
        """
        # First create our consumers
        i1_uuid = uuids.instance1
        i1_consumer = consumer_obj.Consumer(
            self.ctx, uuid=i1_uuid, user=self.user_obj,
            project=self.project_obj)
        i1_consumer.create()

        # then all our original providers
        cn1 = self._create_provider('cn1')
        tb.add_inventory(cn1, 'VCPU', 16)
        tb.add_inventory(cn1, 'MEMORY_MB', 32768)
        tb.add_inventory(cn1, 'DISK_GB', 1000)

        # Allocate an instance on our compute node
        allocs = [
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1,
                resource_class='VCPU', consumer=i1_consumer, used=2),
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1,
                resource_class='MEMORY_MB', consumer=i1_consumer, used=1024),
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1,
                resource_class='DISK_GB', consumer=i1_consumer, used=100),
        ]
        alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs)
        alloc_list.replace_all()

        # Before we issue the actual reshape() call, we need to first create
        # the child providers and sharing storage provider. These are actions
        # that the virt driver or external agent is responsible for performing
        # *before* attempting any reshape activity.
        cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid)
        cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid)
        ss = self._create_provider('ss')

        # OK, now emulate the call to POST /reshaper that will be triggered by
        # a virt driver wanting to replace the world and change its modeling
        # from a single provider to a nested provider tree along with a sharing
        # storage provider.
        after_inventories = {
            # cn1 keeps the RAM only
            cn1.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=cn1,
                    resource_class='MEMORY_MB', total=32768, reserved=0,
                    max_unit=32768, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
            # each NUMA node gets half of the CPUs
            cn1_numa0.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=cn1_numa0,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
            cn1_numa1.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=cn1_numa1,
                    resource_class='VCPU', total=8, reserved=0,
                    max_unit=8, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
            # The sharing provider gets a bunch of disk
            ss.uuid: rp_obj.InventoryList(self.ctx, objects=[
                rp_obj.Inventory(
                    self.ctx, resource_provider=ss,
                    resource_class='DISK_GB', total=100000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0),
            ]),
        }
        # We do a fetch from the DB for each instance to get its latest
        # generation. This would be done by the resource tracker or scheduler
        # report client before issuing the call to reshape() because the
        # consumers representing the two instances above will have had their
        # generations incremented in the original call to PUT
        # /allocations/{consumer_uuid}
        i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid)
        after_allocs = rp_obj.AllocationList(self.ctx, objects=[
            # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB
            # from the sharing storage provider
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1_numa0, resource_class='VCPU',
                consumer=i1_consumer, used=2),
            rp_obj.Allocation(
                self.ctx, resource_provider=cn1, resource_class='MEMORY_MB',
                consumer=i1_consumer, used=1024),
            rp_obj.Allocation(
                self.ctx, resource_provider=ss, resource_class='DISK_GB',
                consumer=i1_consumer, used=100),
        ])

        # OK, now before we call reshape(), here we emulate another thread
        # changing the inventory for the sharing storage provider in between
        # the time in the REST handler when the sharing storage provider's
        # generation was validated and the actual call to reshape()
        ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid)
        # Reduce the amount of storage to 2000, from 100000.
        new_ss_inv = rp_obj.InventoryList(self.ctx, objects=[
            rp_obj.Inventory(
                self.ctx, resource_provider=ss_threadB,
                resource_class='DISK_GB', total=2000, reserved=0,
                    max_unit=1000, min_unit=1, step_size=1,
                    allocation_ratio=1.0)])
        ss_threadB.set_inventory(new_ss_inv)
        # Double check our storage provider's generation is now greater than
        # the original storage provider record being sent to reshape()
        self.assertGreater(ss_threadB.generation, ss.generation)

        # And we should legitimately get a failure now to reshape() due to
        # another thread updating one of the involved provider's generations
        self.assertRaises(
            exception.ConcurrentUpdateDetected,
            rp_obj.reshape, self.ctx, after_inventories, after_allocs)
Esempio n. 14
0
    def start_fixture(self):
        super(GranularFixture, self).start_fixture()

        rp_obj.ResourceClass(
            context=self.context, name='CUSTOM_NET_MBPS').create()

        os.environ['AGGA'] = uuids.aggA
        os.environ['AGGB'] = uuids.aggB
        os.environ['AGGC'] = uuids.aggC

        cn_left = tb.create_provider(self.context, 'cn_left', uuids.aggA)
        os.environ['CN_LEFT'] = cn_left.uuid
        tb.add_inventory(cn_left, 'VCPU', 8)
        tb.add_inventory(cn_left, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_left, 'DISK_GB', 500)
        tb.add_inventory(cn_left, 'VGPU', 8)
        tb.add_inventory(cn_left, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO')

        cn_middle = tb.create_provider(
            self.context, 'cn_middle', uuids.aggA, uuids.aggB)
        os.environ['CN_MIDDLE'] = cn_middle.uuid
        tb.add_inventory(cn_middle, 'VCPU', 8)
        tb.add_inventory(cn_middle, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_middle, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS')

        cn_right = tb.create_provider(
            self.context, 'cn_right', uuids.aggB, uuids.aggC)
        os.environ['CN_RIGHT'] = cn_right.uuid
        tb.add_inventory(cn_right, 'VCPU', 8)
        tb.add_inventory(cn_right, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_right, 'DISK_GB', 500)
        tb.add_inventory(cn_right, 'VGPU', 8, max_unit=2)
        tb.set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA',
                      'CUSTOM_DISK_SSD')

        shr_disk_1 = tb.create_provider(self.context, 'shr_disk_1', uuids.aggA)
        os.environ['SHR_DISK_1'] = shr_disk_1.uuid
        tb.add_inventory(shr_disk_1, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_1, 'MISC_SHARES_VIA_AGGREGATE',
                      'CUSTOM_DISK_SSD')

        shr_disk_2 = tb.create_provider(
            self.context, 'shr_disk_2', uuids.aggA, uuids.aggB)
        os.environ['SHR_DISK_2'] = shr_disk_2.uuid
        tb.add_inventory(shr_disk_2, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_2, 'MISC_SHARES_VIA_AGGREGATE')

        shr_net = tb.create_provider(self.context, 'shr_net', uuids.aggC)
        os.environ['SHR_NET'] = shr_net.uuid
        tb.add_inventory(shr_net, 'SRIOV_NET_VF', 16)
        tb.add_inventory(shr_net, 'CUSTOM_NET_MBPS', 40000)
        tb.set_traits(shr_net, 'MISC_SHARES_VIA_AGGREGATE')