def test_get_all_by_required(self):
        # Create some resource providers and give them each 0 or more traits.
        # rp_name_0: no traits
        # rp_name_1: CUSTOM_TRAIT_A
        # rp_name_2: CUSTOM_TRAIT_A, CUSTOM_TRAIT_B
        # rp_name_3: CUSTOM_TRAIT_A, CUSTOM_TRAIT_B, CUSTOM_TRAIT_C
        trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B',
                       'CUSTOM_TRAIT_C']
        for rp_i in [0, 1, 2, 3]:
            rp = self._create_provider('rp_' + str(rp_i))
            if rp_i:
                traits = trait_names[0:rp_i]
                tb.set_traits(rp, *traits)

        # Three rps (1, 2, 3) should have CUSTOM_TRAIT_A
        filters = {'required': ['CUSTOM_TRAIT_A']}
        expected_rps = ['rp_1', 'rp_2', 'rp_3']
        self._run_get_all_by_filters(expected_rps, filters=filters)

        # One rp (rp 1) if we forbid CUSTOM_TRAIT_B, with a single trait of
        # CUSTOM_TRAIT_A
        filters = {'required': ['CUSTOM_TRAIT_A', '!CUSTOM_TRAIT_B']}
        expected_rps = ['rp_1']
        custom_a_rps = self._run_get_all_by_filters(expected_rps,
                                                    filters=filters)

        self.assertEqual(uuidsentinel.rp_1, custom_a_rps[0].uuid)
        traits = trait_obj.get_all_by_resource_provider(
            self.ctx, custom_a_rps[0])
        self.assertEqual(1, len(traits))
        self.assertEqual('CUSTOM_TRAIT_A', traits[0].name)
Example #2
0
    def start_fixture(self):
        super(SharedStorageFixture, self).start_fixture()

        agg_uuid = uuidutils.generate_uuid()

        cn1 = tb.create_provider(self.context, 'cn1', agg_uuid)
        cn2 = tb.create_provider(self.context, 'cn2', agg_uuid)
        ss = tb.create_provider(self.context, 'ss', agg_uuid)
        ss2 = tb.create_provider(self.context, 'ss2', agg_uuid)

        numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid)
        numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
        numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
        numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)

        pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid)
        pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid)
        pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid)
        pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid)

        os.environ['AGG_UUID'] = agg_uuid

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid
        os.environ['SS_UUID'] = ss.uuid
        os.environ['SS2_UUID'] = ss2.uuid

        os.environ['NUMA1_1_UUID'] = numa1_1.uuid
        os.environ['NUMA1_2_UUID'] = numa1_2.uuid
        os.environ['NUMA2_1_UUID'] = numa2_1.uuid
        os.environ['NUMA2_2_UUID'] = numa2_2.uuid

        os.environ['PF1_1_UUID'] = pf1_1.uuid
        os.environ['PF1_2_UUID'] = pf1_2.uuid
        os.environ['PF2_1_UUID'] = pf2_1.uuid
        os.environ['PF2_2_UUID'] = pf2_2.uuid

        # Populate compute node inventory for VCPU and RAM
        for cn in (cn1, cn2):
            tb.add_inventory(cn, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, orc.MEMORY_MB, 128 * 1024,
                             allocation_ratio=1.5)

        tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')
        tb.add_inventory(cn2, orc.DISK_GB, 2000,
                         reserved=100, allocation_ratio=1.0)

        for shared in (ss, ss2):
            # Populate shared storage provider with DISK_GB inventory and
            # mark it shared among any provider associated via aggregate
            tb.add_inventory(shared, orc.DISK_GB, 2000,
                             reserved=100, allocation_ratio=1.0)
            tb.set_traits(shared, 'MISC_SHARES_VIA_AGGREGATE')

        # Populate PF inventory for VF
        for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
            tb.add_inventory(pf, orc.SRIOV_NET_VF,
                             8, allocation_ratio=1.0)
Example #3
0
    def start_fixture(self):
        super(NUMAAggregateFixture, self).start_fixture()

        aggA_uuid = uuidutils.generate_uuid()
        aggB_uuid = uuidutils.generate_uuid()
        aggC_uuid = uuidutils.generate_uuid()

        cn1 = tb.create_provider(self.context, 'cn1', aggA_uuid)
        cn2 = tb.create_provider(self.context, 'cn2', aggA_uuid, aggB_uuid)
        ss1 = tb.create_provider(self.context, 'ss1', aggA_uuid)
        ss2 = tb.create_provider(self.context, 'ss2', aggC_uuid)

        numa1_1 = tb.create_provider(self.context,
                                     'numa1_1',
                                     aggC_uuid,
                                     parent=cn1.uuid)
        numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
        numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
        numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)

        os.environ['AGGA_UUID'] = aggA_uuid
        os.environ['AGGB_UUID'] = aggB_uuid
        os.environ['AGGC_UUID'] = aggC_uuid

        os.environ['CN1_UUID'] = cn1.uuid
        os.environ['CN2_UUID'] = cn2.uuid
        os.environ['SS1_UUID'] = ss1.uuid
        os.environ['SS2_UUID'] = ss2.uuid

        os.environ['NUMA1_1_UUID'] = numa1_1.uuid
        os.environ['NUMA1_2_UUID'] = numa1_2.uuid
        os.environ['NUMA2_1_UUID'] = numa2_1.uuid
        os.environ['NUMA2_2_UUID'] = numa2_2.uuid

        # Populate compute node inventory for VCPU and RAM
        for numa in (numa1_1, numa1_2, numa2_1, numa2_2):
            tb.add_inventory(numa, orc.VCPU, 24, allocation_ratio=16.0)

        # Populate shared storage provider with DISK_GB inventory and
        # mark it shared among any provider associated via aggregate
        for ss in (ss1, ss2):
            tb.add_inventory(ss,
                             orc.DISK_GB,
                             2000,
                             reserved=100,
                             allocation_ratio=1.0)
            tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')
            tb.set_sharing_among_agg(ss)
    def test_set_traits_for_resource_provider(self):
        rp = self._create_provider('fake_resource_provider')
        generation = rp.generation
        self.assertIsInstance(rp.id, int)

        trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C']
        tb.set_traits(rp, *trait_names)

        rp_traits = trait_obj.get_all_by_resource_provider(self.ctx, rp)
        self._assert_traits(trait_names, rp_traits)
        self.assertEqual(rp.generation, generation + 1)
        generation = rp.generation

        trait_names.remove('CUSTOM_TRAIT_A')
        updated_traits = trait_obj.get_all(
            self.ctx, filters={'name_in': trait_names})
        self._assert_traits(trait_names, updated_traits)
        tb.set_traits(rp, *trait_names)
        rp_traits = trait_obj.get_all_by_resource_provider(self.ctx, rp)
        self._assert_traits(trait_names, rp_traits)
        self.assertEqual(rp.generation, generation + 1)
    def test_set_traits_for_correct_resource_provider(self):
        """This test creates two ResourceProviders, and attaches same trait to
        both of them. Then detaching the trait from one of them, and ensure
        the trait still associated with another one.
        """
        # Create two ResourceProviders
        rp1 = self._create_provider('fake_resource_provider1')
        rp2 = self._create_provider('fake_resource_provider2')

        tname = 'CUSTOM_TRAIT_A'

        # Associate the trait with two ResourceProviders
        tb.set_traits(rp1, tname)
        tb.set_traits(rp2, tname)

        # Ensure the association
        rp1_traits = trait_obj.get_all_by_resource_provider(self.ctx, rp1)
        rp2_traits = trait_obj.get_all_by_resource_provider(self.ctx, rp2)
        self._assert_traits([tname], rp1_traits)
        self._assert_traits([tname], rp2_traits)

        # Detach the trait from one of ResourceProvider, and ensure the
        # trait association with another ResourceProvider still exists.
        tb.set_traits(rp1)
        rp1_traits = trait_obj.get_all_by_resource_provider(self.ctx, rp1)
        rp2_traits = trait_obj.get_all_by_resource_provider(self.ctx, rp2)
        self._assert_traits([], rp1_traits)
        self._assert_traits([tname], rp2_traits)
    def test_destroy_with_traits(self):
        """Test deleting a resource provider that has a trait successfully.
        """
        rp = self._create_provider('fake_rp1', uuid=uuidsentinel.fake_rp1)
        custom_trait = 'CUSTOM_TRAIT_1'
        tb.set_traits(rp, custom_trait)

        trl = trait_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(1, len(trl))

        # Delete a resource provider that has a trait assosiation.
        rp.destroy()

        # Assert the record has been deleted
        # in 'resource_provider_traits' table
        # after Resource Provider object has been destroyed.
        trl = trait_obj.get_all_by_resource_provider(self.ctx, rp)
        self.assertEqual(0, len(trl))
        # Assert that NotFound exception is raised.
        self.assertRaises(exception.NotFound,
                          rp_obj.ResourceProvider.get_by_uuid,
                          self.ctx, uuidsentinel.fake_rp1)
Example #7
0
    def start_fixture(self):
        super(GranularFixture, self).start_fixture()

        rc_obj.ResourceClass(
            context=self.context, name='CUSTOM_NET_MBPS').create()

        os.environ['AGGA'] = uuids.aggA
        os.environ['AGGB'] = uuids.aggB
        os.environ['AGGC'] = uuids.aggC

        cn_left = tb.create_provider(self.context, 'cn_left', uuids.aggA)
        os.environ['CN_LEFT'] = cn_left.uuid
        tb.add_inventory(cn_left, 'VCPU', 8)
        tb.add_inventory(cn_left, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_left, 'DISK_GB', 500)
        tb.add_inventory(cn_left, 'VGPU', 8)
        tb.add_inventory(cn_left, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO')

        cn_middle = tb.create_provider(
            self.context, 'cn_middle', uuids.aggA, uuids.aggB)
        os.environ['CN_MIDDLE'] = cn_middle.uuid
        tb.add_inventory(cn_middle, 'VCPU', 8)
        tb.add_inventory(cn_middle, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_middle, 'SRIOV_NET_VF', 8)
        tb.add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000)
        tb.set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
                      'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS')

        cn_right = tb.create_provider(
            self.context, 'cn_right', uuids.aggB, uuids.aggC)
        os.environ['CN_RIGHT'] = cn_right.uuid
        tb.add_inventory(cn_right, 'VCPU', 8)
        tb.add_inventory(cn_right, 'MEMORY_MB', 4096)
        tb.add_inventory(cn_right, 'DISK_GB', 500)
        tb.add_inventory(cn_right, 'VGPU', 8, max_unit=2)
        tb.set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA',
                      'CUSTOM_DISK_SSD')

        shr_disk_1 = tb.create_provider(self.context, 'shr_disk_1', uuids.aggA)
        os.environ['SHR_DISK_1'] = shr_disk_1.uuid
        tb.add_inventory(shr_disk_1, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_1, 'MISC_SHARES_VIA_AGGREGATE',
                      'CUSTOM_DISK_SSD')

        shr_disk_2 = tb.create_provider(
            self.context, 'shr_disk_2', uuids.aggA, uuids.aggB)
        os.environ['SHR_DISK_2'] = shr_disk_2.uuid
        tb.add_inventory(shr_disk_2, 'DISK_GB', 1000)
        tb.set_traits(shr_disk_2, 'MISC_SHARES_VIA_AGGREGATE')

        shr_net = tb.create_provider(self.context, 'shr_net', uuids.aggC)
        os.environ['SHR_NET'] = shr_net.uuid
        tb.add_inventory(shr_net, 'SRIOV_NET_VF', 16)
        tb.add_inventory(shr_net, 'CUSTOM_NET_MBPS', 40000)
        tb.set_traits(shr_net, 'MISC_SHARES_VIA_AGGREGATE')
Example #8
0
    def make_entities(self):
        aggA_uuid = uuidutils.generate_uuid()
        os.environ['AGGA_UUID'] = aggA_uuid

        ss1 = tb.create_provider(self.context, 'ss1', aggA_uuid)
        tb.set_traits(ss1, ot.MISC_SHARES_VIA_AGGREGATE)
        tb.add_inventory(ss1, orc.DISK_GB, 2000)
        os.environ['SS1_UUID'] = ss1.uuid

        # CN1
        if not self.cn1:
            self.cn1 = tb.create_provider(self.context, 'cn1', aggA_uuid)
        self.cn1.set_aggregates([aggA_uuid])
        tb.set_traits(self.cn1, ot.COMPUTE_VOLUME_MULTI_ATTACH)
        os.environ['CN1_UUID'] = self.cn1.uuid

        numas = []
        for i in (0, 1):
            numa = tb.create_provider(
                self.context, 'numa%d' % i, parent=self.cn1.uuid)
            traits = [ot.HW_NUMA_ROOT]
            if i == 1:
                traits.append('CUSTOM_FOO')
            tb.set_traits(numa, *traits)
            tb.add_inventory(numa, orc.VCPU, 4)
            numas.append(numa)
            os.environ['NUMA%d_UUID' % i] = numa.uuid
        tb.add_inventory(
            numas[0], orc.MEMORY_MB, 2048, min_unit=512, step_size=256)
        tb.add_inventory(
            numas[1], orc.MEMORY_MB, 2048, min_unit=256, max_unit=1024)
        user, proj = tb.create_user_and_project(self.context, prefix='numafx')
        consumer = tb.ensure_consumer(self.context, user, proj)
        tb.set_allocation(self.context, numas[0], consumer, {orc.VCPU: 2})

        fpga = tb.create_provider(self.context, 'fpga0', parent=numas[0].uuid)
        # TODO(efried): Use standard FPGA resource class
        tb.add_inventory(fpga, 'CUSTOM_FPGA', 1)
        os.environ['FPGA0_UUID'] = fpga.uuid

        pgpu = tb.create_provider(self.context, 'pgpu0', parent=numas[0].uuid)
        tb.add_inventory(pgpu, orc.VGPU, 8)
        os.environ['PGPU0_UUID'] = pgpu.uuid

        for i in (0, 1):
            fpga = tb.create_provider(
                self.context, 'fpga1_%d' % i, parent=numas[1].uuid)
            # TODO(efried): Use standard FPGA resource class
            tb.add_inventory(fpga, 'CUSTOM_FPGA', 1)
            os.environ['FPGA1_%d_UUID' % i] = fpga.uuid

        agent = tb.create_provider(
            self.context, 'sriov_agent', parent=self.cn1.uuid)
        tb.set_traits(agent, 'CUSTOM_VNIC_TYPE_DIRECT')
        os.environ['SRIOV_AGENT_UUID'] = agent.uuid

        for i in (1, 2):
            dev = tb.create_provider(
                self.context, 'esn%d' % i, parent=agent.uuid)
            tb.set_traits(dev, 'CUSTOM_PHYSNET%d' % i)
            tb.add_inventory(dev, orc.NET_BW_EGR_KILOBIT_PER_SEC, 10000 * i)
            os.environ['ESN%d_UUID' % i] = dev.uuid

        agent = tb.create_provider(
            self.context, 'ovs_agent', parent=self.cn1.uuid)
        tb.set_traits(agent, 'CUSTOM_VNIC_TYPE_NORMAL')
        os.environ['OVS_AGENT_UUID'] = agent.uuid

        dev = tb.create_provider(self.context, 'br_int', parent=agent.uuid)
        tb.set_traits(dev, 'CUSTOM_PHYSNET0')
        tb.add_inventory(dev, orc.NET_BW_EGR_KILOBIT_PER_SEC, 1000)
        os.environ['BR_INT_UUID'] = dev.uuid

        # CN2
        if not self.cn2:
            self.cn2 = tb.create_provider(self.context, 'cn2')

        self.cn2.set_aggregates([aggA_uuid])
        tb.add_inventory(self.cn2, orc.VCPU, 8)
        # Get a new consumer
        consumer = tb.ensure_consumer(self.context, user, proj)
        tb.set_allocation(self.context, self.cn2, consumer, {orc.VCPU: 3})
        tb.add_inventory(
            self.cn2, orc.MEMORY_MB, 2048, min_unit=1024, step_size=128)
        tb.add_inventory(self.cn2, orc.DISK_GB, 1000)
        tb.set_traits(self.cn2, 'CUSTOM_FOO')
        os.environ['CN2_UUID'] = self.cn2.uuid

        nics = []
        for i in (1, 2, 3):
            nic = tb.create_provider(
                self.context, 'nic%d' % i, parent=self.cn2.uuid)
            # TODO(efried): Use standard HW_NIC_ROOT trait
            tb.set_traits(nic, 'CUSTOM_HW_NIC_ROOT')
            nics.append(nic)
            os.environ['NIC%s_UUID' % i] = nic.uuid
        # PFs for NIC1
        for i in (1, 2):
            suf = '1_%d' % i
            pf = tb.create_provider(
                self.context, 'pf%s' % suf, parent=nics[0].uuid)
            tb.set_traits(pf, 'CUSTOM_PHYSNET%d' % i)
            # TODO(efried): Use standard generic VF resource class?
            tb.add_inventory(pf, 'CUSTOM_VF', 4)
            os.environ['PF%s_UUID' % suf] = pf.uuid
        # PFs for NIC2
        for i in (0, 1, 2, 3):
            suf = '2_%d' % (i + 1)
            pf = tb.create_provider(
                self.context, 'pf%s' % suf, parent=nics[1].uuid)
            tb.set_traits(pf, 'CUSTOM_PHYSNET%d' % ((i % 2) + 1))
            # TODO(efried): Use standard generic VF resource class?
            tb.add_inventory(pf, 'CUSTOM_VF', 2)
            os.environ['PF%s_UUID' % suf] = pf.uuid
        # PF for NIC3
        suf = '3_1'
        pf = tb.create_provider(
            self.context, 'pf%s' % suf, parent=nics[2].uuid)
        tb.set_traits(pf, 'CUSTOM_PHYSNET1')
        # TODO(efried): Use standard generic VF resource class?
        tb.add_inventory(pf, 'CUSTOM_VF', 8)
        os.environ['PF%s_UUID' % suf] = pf.uuid
Example #9
0
 def test_trait_delete_in_use(self):
     rp = self._create_provider('fake_resource_provider')
     t, = tb.set_traits(rp, 'CUSTOM_TRAIT_A')
     self.assertRaises(exception.TraitInUse, t.destroy)
Example #10
0
    def test_shared_provider_capacity(self):
        """Sets up a resource provider that shares DISK_GB inventory via an
        aggregate, a couple resource providers representing "local disk"
        compute nodes and ensures the _get_providers_sharing_capacity()
        function finds that provider and not providers of "local disk".
        """
        # Create the two "local disk" compute node providers
        cn1 = self._create_provider('cn1')
        cn2 = self._create_provider('cn2')

        # Populate the two compute node providers with inventory.  One has
        # DISK_GB.  Both should be excluded from the result (one doesn't have
        # the requested resource; but neither is a sharing provider).
        for cn in (cn1, cn2):
            tb.add_inventory(cn, orc.VCPU, 24,
                             allocation_ratio=16.0)
            tb.add_inventory(cn, orc.MEMORY_MB, 32768,
                             min_unit=64,
                             max_unit=32768,
                             step_size=64,
                             allocation_ratio=1.5)
            if cn is cn1:
                tb.add_inventory(cn, orc.DISK_GB, 2000,
                                 min_unit=100,
                                 max_unit=2000,
                                 step_size=10)

        # Create the shared storage pool
        ss1 = self._create_provider('shared storage 1')
        ss2 = self._create_provider('shared storage 2')

        # Give the shared storage pool some inventory of DISK_GB
        for ss, disk_amount in ((ss1, 2000), (ss2, 1000)):
            tb.add_inventory(ss, orc.DISK_GB, disk_amount,
                             min_unit=100,
                             max_unit=2000,
                             step_size=10)
            # Mark the shared storage pool as having inventory shared among
            # any provider associated via aggregate
            tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE")

        # OK, now that has all been set up, let's verify that we get the ID of
        # the shared storage pool
        got_ids = res_ctx.get_sharing_providers(self.ctx)
        self.assertEqual(set([ss1.id, ss2.id]), got_ids)

        request = placement_lib.RequestGroup(
            use_same_provider=False,
            resources={orc.VCPU: 2,
                       orc.MEMORY_MB: 256,
                       orc.DISK_GB: 1500})
        has_trees = res_ctx._has_provider_trees(self.ctx)
        sharing = res_ctx.get_sharing_providers(self.ctx)
        rg_ctx = res_ctx.RequestGroupSearchContext(
            self.ctx, request, has_trees, sharing)

        VCPU_ID = orc.STANDARDS.index(orc.VCPU)
        DISK_GB_ID = orc.STANDARDS.index(orc.DISK_GB)

        rps_sharing_vcpu = rg_ctx.get_rps_with_shared_capacity(VCPU_ID)
        self.assertEqual(set(), rps_sharing_vcpu)

        rps_sharing_dist = rg_ctx.get_rps_with_shared_capacity(DISK_GB_ID)
        self.assertEqual(set([ss1.id]), rps_sharing_dist)
Example #11
0
    def start_fixture(self):
        super(NeutronQoSMultiSegmentFixture, self).start_fixture()

        # compute 0 with not connectivity to the multi segment network
        compute0 = tb.create_provider(self.context, 'compute0')
        os.environ['compute0'] = compute0.uuid
        tb.add_inventory(compute0, 'VCPU', 8)
        tb.add_inventory(compute0, 'MEMORY_MB', 4096)
        tb.add_inventory(compute0, 'DISK_GB', 500)

        # OVS agent subtree
        compute0_ovs_agent = tb.create_provider(self.context,
                                                'compute0:Open vSwitch agent',
                                                parent=compute0.uuid)
        os.environ['compute0:ovs_agent'] = compute0_ovs_agent.uuid
        tb.add_inventory(compute0_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute0_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute0_br_ex = tb.create_provider(
            self.context,
            'compute0:Open vSwitch agent:br-ex',
            parent=compute0_ovs_agent.uuid)
        os.environ['compute0:br_ex'] = compute0_br_ex.uuid
        tb.add_inventory(compute0_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 5000)
        tb.add_inventory(compute0_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 5000)
        tb.set_traits(
            compute0_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_OTHER',
        )

        # SRIOV agent subtree
        compute0_sriov_agent = tb.create_provider(self.context,
                                                  'compute0:NIC Switch agent',
                                                  parent=compute0.uuid)
        os.environ['compute0:sriov_agent'] = compute0_sriov_agent.uuid
        tb.set_traits(
            compute0_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute0_pf0 = tb.create_provider(
            self.context,
            'compute0:NIC Switch agent:enp129s0f0',
            parent=compute0_sriov_agent.uuid)
        os.environ['compute0:pf0'] = compute0_pf0.uuid
        tb.add_inventory(compute0_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 10000)
        tb.add_inventory(compute0_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 10000)
        tb.set_traits(
            compute0_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_OTHER',
        )

        # compute 1 with network connectivity to segment 1
        compute1 = tb.create_provider(self.context, 'compute1')
        os.environ['compute1'] = compute1.uuid
        tb.add_inventory(compute1, 'VCPU', 8)
        tb.add_inventory(compute1, 'MEMORY_MB', 4096)
        tb.add_inventory(compute1, 'DISK_GB', 500)
        # OVS agent subtree
        compute1_ovs_agent = tb.create_provider(self.context,
                                                'compute1:Open vSwitch agent',
                                                parent=compute1.uuid)
        os.environ['compute1:ovs_agent'] = compute1_ovs_agent.uuid
        tb.add_inventory(compute1_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute1_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute1_br_ex = tb.create_provider(
            self.context,
            'compute1:Open vSwitch agent:br-ex',
            parent=compute1_ovs_agent.uuid)
        os.environ['compute1:br_ex'] = compute1_br_ex.uuid
        tb.add_inventory(compute1_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 5000)
        tb.add_inventory(compute1_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 5000)
        tb.set_traits(
            compute1_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        # SRIOV agent subtree
        compute1_sriov_agent = tb.create_provider(self.context,
                                                  'compute1:NIC Switch agent',
                                                  parent=compute1.uuid)
        os.environ['compute1:sriov_agent'] = compute1_sriov_agent.uuid
        tb.set_traits(
            compute1_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute1_pf0 = tb.create_provider(
            self.context,
            'compute1:NIC Switch agent:enp129s0f0',
            parent=compute1_sriov_agent.uuid)
        os.environ['compute1:pf0'] = compute1_pf0.uuid
        tb.add_inventory(compute1_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 10000)
        tb.add_inventory(compute1_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 10000)
        tb.set_traits(
            compute1_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        # compute 2 with network connectivity to segment 2
        compute2 = tb.create_provider(self.context, 'compute2')
        os.environ['compute2'] = compute2.uuid
        tb.add_inventory(compute2, 'VCPU', 8)
        tb.add_inventory(compute2, 'MEMORY_MB', 4096)
        tb.add_inventory(compute2, 'DISK_GB', 500)

        # OVS agent subtree
        compute2_ovs_agent = tb.create_provider(self.context,
                                                'compute2:Open vSwitch agent',
                                                parent=compute2.uuid)
        os.environ['compute2:ovs_agent'] = compute2_ovs_agent.uuid
        tb.add_inventory(compute2_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute2_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute2_br_ex = tb.create_provider(
            self.context,
            'compute2:Open vSwitch agent:br-ex',
            parent=compute2_ovs_agent.uuid)
        os.environ['compute2:br_ex'] = compute2_br_ex.uuid
        tb.add_inventory(compute2_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 5000)
        tb.add_inventory(compute2_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 5000)
        tb.set_traits(
            compute2_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S2',
        )

        # SRIOV agent subtree
        compute2_sriov_agent = tb.create_provider(self.context,
                                                  'compute2:NIC Switch agent',
                                                  parent=compute2.uuid)
        os.environ['compute2:sriov_agent'] = compute2_sriov_agent.uuid
        tb.set_traits(
            compute2_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute2_pf0 = tb.create_provider(
            self.context,
            'compute2:NIC Switch agent:enp129s0f0',
            parent=compute2_sriov_agent.uuid)
        os.environ['compute2:pf0'] = compute2_pf0.uuid
        tb.add_inventory(compute2_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 10000)
        tb.add_inventory(compute2_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 10000)
        tb.set_traits(
            compute2_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S2',
        )

        # compute 3 with network connectivity to both segment 1 and 2
        compute3 = tb.create_provider(self.context, 'compute3')
        os.environ['compute3'] = compute3.uuid
        tb.add_inventory(compute3, 'VCPU', 8)
        tb.add_inventory(compute3, 'MEMORY_MB', 4096)
        tb.add_inventory(compute3, 'DISK_GB', 500)

        # OVS agent subtree
        compute3_ovs_agent = tb.create_provider(self.context,
                                                'compute3:Open vSwitch agent',
                                                parent=compute3.uuid)
        os.environ['compute3:ovs_agent'] = compute3_ovs_agent.uuid
        tb.add_inventory(compute3_ovs_agent,
                         'NET_PACKET_RATE_KILOPACKET_PER_SEC', 1000)
        tb.set_traits(
            compute3_ovs_agent,
            'CUSTOM_VNIC_TYPE_NORMAL',
        )

        compute3_br_ex = tb.create_provider(
            self.context,
            'compute3:Open vSwitch agent:br-ex',
            parent=compute3_ovs_agent.uuid)
        os.environ['compute3:br_ex'] = compute3_br_ex.uuid
        tb.add_inventory(compute3_br_ex, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_br_ex, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_br_ex,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        compute3_br_ex2 = tb.create_provider(
            self.context,
            'compute3:Open vSwitch agent:br-ex2',
            parent=compute3_ovs_agent.uuid)
        os.environ['compute3:br_ex2'] = compute3_br_ex2.uuid
        tb.add_inventory(compute3_br_ex2, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_br_ex2, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_br_ex2,
            'CUSTOM_VNIC_TYPE_NORMAL',
            'CUSTOM_PHYSNET_MSN_S2',
        )

        # SRIOV agent subtree
        compute3_sriov_agent = tb.create_provider(self.context,
                                                  'compute3:NIC Switch agent',
                                                  parent=compute3.uuid)
        os.environ['compute3:sriov_agent'] = compute2_sriov_agent.uuid
        tb.set_traits(
            compute3_sriov_agent,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
        )

        compute3_pf0 = tb.create_provider(
            self.context,
            'compute3:NIC Switch agent:enp129s0f0',
            parent=compute3_sriov_agent.uuid)
        os.environ['compute3:pf0'] = compute3_pf0.uuid
        tb.add_inventory(compute3_pf0, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_pf0, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_pf0,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S1',
        )

        compute3_pf1 = tb.create_provider(
            self.context,
            'compute3:NIC Switch agent:enp129s0f1',
            parent=compute3_sriov_agent.uuid)
        os.environ['compute3:pf1'] = compute3_pf1.uuid
        tb.add_inventory(compute3_pf1, 'NET_BW_EGR_KILOBIT_PER_SEC', 1000)
        tb.add_inventory(compute3_pf1, 'NET_BW_IGR_KILOBIT_PER_SEC', 1000)
        tb.set_traits(
            compute3_pf1,
            'CUSTOM_VNIC_TYPE_DIRECT',
            'CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL',
            'CUSTOM_VNIC_TYPE_MACVTAP',
            'CUSTOM_PHYSNET_MSN_S2',
        )