Exemplo n.º 1
0
 def test_populate_from_iterable_disown_grandchild(self):
     # Start with:
     # root
     #   +-> child
     #   |      +-> grandchild
     # Then send in [child] and grandchild should disappear.
     child = {
         'uuid': uuids.child,
         'name': 'child',
         'generation': 1,
         'parent_provider_uuid': uuids.root,
     }
     pt = provider_tree.ProviderTree()
     plist = [
         {
             'uuid': uuids.root,
             'name': 'root',
             'generation': 0,
         },
         child,
         {
             'uuid': uuids.grandchild,
             'name': 'grandchild',
             'generation': 2,
             'parent_provider_uuid': uuids.child,
         },
     ]
     pt.populate_from_iterable(plist)
     self.assertEqual([uuids.root, uuids.child, uuids.grandchild],
                      pt.get_provider_uuids())
     self.assertTrue(pt.exists(uuids.grandchild))
     pt.populate_from_iterable([child])
     self.assertEqual([uuids.root, uuids.child], pt.get_provider_uuids())
     self.assertFalse(pt.exists(uuids.grandchild))
Exemplo n.º 2
0
    def test_populate_from_iterable_error_orphan_cycle(self):
        pt = provider_tree.ProviderTree()

        # Error trying to populate with an orphan
        grandchild1_1 = {
            'uuid': uuids.grandchild1_1,
            'name': 'grandchild1_1',
            'generation': 11,
            'parent_provider_uuid': uuids.child1,
        }

        self.assertRaises(ValueError, pt.populate_from_iterable,
                          [grandchild1_1])

        # Create a cycle so there are no orphans, but no path to a root
        cycle = {
            'uuid': uuids.child1,
            'name': 'child1',
            'generation': 1,
            # There's a country song about this
            'parent_provider_uuid': uuids.grandchild1_1,
        }

        self.assertRaises(ValueError, pt.populate_from_iterable,
                          [grandchild1_1, cycle])
Exemplo n.º 3
0
 def test_has_inventory_changed_no_existing_rp(self):
     cns = self.compute_nodes
     pt = provider_tree.ProviderTree(cns)
     self.assertRaises(
         ValueError,
         pt.has_inventory_changed,
         uuids.non_existing_rp,
         {}
     )
Exemplo n.º 4
0
 def test_update_inventory_no_existing_rp(self):
     cns = self.compute_nodes
     pt = provider_tree.ProviderTree(cns)
     self.assertRaises(
         ValueError,
         pt.update_inventory,
         uuids.non_existing_rp,
         {},
         1,
     )
Exemplo n.º 5
0
    def test_has_inventory_changed(self):
        cn = self.compute_node1
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)
        rp_gen = 1

        cn_inv = {
            'VCPU': {
                'total': 8,
                'reserved': 0,
                'min_unit': 1,
                'max_unit': 8,
                'step_size': 1,
                'allocation_ratio': 16.0,
            },
            'MEMORY_MB': {
                'total': 1024,
                'reserved': 512,
                'min_unit': 64,
                'max_unit': 1024,
                'step_size': 64,
                'allocation_ratio': 1.5,
            },
            'DISK_GB': {
                'total': 1000,
                'reserved': 100,
                'min_unit': 10,
                'max_unit': 1000,
                'step_size': 10,
                'allocation_ratio': 1.0,
            },
        }
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        # Updating with the same inventory info should return False
        self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        cn_inv['VCPU']['total'] = 6
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        # Deleting a key in the new record should NOT result in changes being
        # recorded...
        del cn_inv['VCPU']['allocation_ratio']
        self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        del cn_inv['MEMORY_MB']
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
Exemplo n.º 6
0
    def test_update_provider_tree(self, mock_get_stats):
        # Add a wrinkle such that cpu_allocation_ratio is configured to a
        # non-default value and overrides initial_cpu_allocation_ratio.
        self.flags(cpu_allocation_ratio=1.0)
        # Add a wrinkle such that reserved_host_memory_mb is set to a
        # non-default value.
        self.flags(reserved_host_memory_mb=2048)
        expected_reserved_disk = (xenapi_driver.XenAPIDriver.
                                  _get_reserved_host_disk_gb_from_config())
        expected_inv = {
            orc.VCPU: {
                'total': 50,
                'min_unit': 1,
                'max_unit': 50,
                'step_size': 1,
                'allocation_ratio': CONF.cpu_allocation_ratio,
                'reserved': CONF.reserved_host_cpus,
            },
            orc.MEMORY_MB: {
                'total': 3,
                'min_unit': 1,
                'max_unit': 3,
                'step_size': 1,
                'allocation_ratio': CONF.initial_ram_allocation_ratio,
                'reserved': CONF.reserved_host_memory_mb,
            },
            orc.DISK_GB: {
                'total': 5,
                'min_unit': 1,
                'max_unit': 5,
                'step_size': 1,
                'allocation_ratio': CONF.initial_disk_allocation_ratio,
                'reserved': expected_reserved_disk,
            },
            orc.VGPU: {
                'total': 7,
                'min_unit': 1,
                'max_unit': 1,
                'step_size': 1,
            },
        }

        mock_get_stats.side_effect = self.host_stats
        drv = self._get_driver()
        pt = provider_tree.ProviderTree()
        nodename = 'fake-node'
        pt.new_root(nodename, uuids.rp_uuid)
        drv.update_provider_tree(pt, nodename)
        inv = pt.data(nodename).inventory
        mock_get_stats.assert_called_once_with(refresh=True)
        self.assertEqual(expected_inv, inv)
Exemplo n.º 7
0
    def test_update_provider_tree_no_vgpu(self, mock_get_stats):
        # Test when there are no vGPU resources in the inventory.
        host_stats = self.host_stats()
        host_stats.update(vgpu_stats={})
        mock_get_stats.return_value = host_stats

        drv = self._get_driver()
        pt = provider_tree.ProviderTree()
        nodename = 'fake-node'
        pt.new_root(nodename, uuids.rp_uuid)
        drv.update_provider_tree(pt, nodename)
        inv = pt.data(nodename).inventory

        # check if the inventory data does NOT contain VGPU.
        self.assertNotIn(orc.VGPU, inv)
Exemplo n.º 8
0
    def test_have_aggregates_changed(self):
        cn = self.compute_node1
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)
        rp_gen = 1

        aggregates = [
            uuids.agg1,
            uuids.agg2,
        ]
        self.assertTrue(pt.have_aggregates_changed(cn.uuid, aggregates))
        self.assertTrue(pt.in_aggregates(cn.uuid, []))
        self.assertFalse(pt.in_aggregates(cn.uuid, aggregates))
        self.assertFalse(pt.in_aggregates(cn.uuid, aggregates[:1]))
        self.assertTrue(pt.update_aggregates(cn.uuid, aggregates,
                                             generation=rp_gen))
        self.assertTrue(pt.in_aggregates(cn.uuid, aggregates))
        self.assertTrue(pt.in_aggregates(cn.uuid, aggregates[:1]))

        # data() gets the same aggregates
        cnsnap = pt.data(cn.uuid)
        self.assertFalse(
            pt.have_aggregates_changed(cn.uuid, cnsnap.aggregates))

        # Updating with the same aggregates info should return False
        self.assertFalse(pt.have_aggregates_changed(cn.uuid, aggregates))
        # But the generation should get updated
        rp_gen = 2
        self.assertFalse(pt.update_aggregates(cn.uuid, aggregates,
                                              generation=rp_gen))
        self.assertFalse(pt.have_aggregates_changed(cn.uuid, aggregates))
        self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
        self.assertTrue(pt.in_aggregates(cn.uuid, aggregates))
        self.assertTrue(pt.in_aggregates(cn.uuid, aggregates[:1]))

        # Make a change to the aggregates list
        aggregates.append(uuids.agg3)
        self.assertTrue(pt.have_aggregates_changed(cn.uuid, aggregates))
        self.assertFalse(pt.in_aggregates(cn.uuid, aggregates[-1:]))
        # Don't update the generation
        self.assertTrue(pt.update_aggregates(cn.uuid, aggregates))
        self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
        self.assertTrue(pt.in_aggregates(cn.uuid, aggregates[-1:]))
        # Previously-taken data now differs
        self.assertTrue(pt.have_aggregates_changed(cn.uuid, cnsnap.aggregates))
Exemplo n.º 9
0
    def test_populate_from_iterable_with_root_update(self):
        # Ensure we can update hierarchies, including adding children, in a
        # tree that's already populated.  This tests the case where a given
        # provider exists both in the tree and in the input.  We must replace
        # that provider *before* we inject its descendants; otherwise the
        # descendants will be lost.  Note that this test case is not 100%
        # reliable, as we can't predict the order over which hashed values are
        # iterated.

        pt = provider_tree.ProviderTree()

        # Let's create a root
        plist = [
            {
                'uuid': uuids.root,
                'name': 'root',
                'generation': 0,
            },
        ]
        pt.populate_from_iterable(plist)
        expected_uuids = [uuids.root]
        self.assertEqual(expected_uuids, pt.get_provider_uuids())

        # Let's add a child updating the name and generation for the root.
        # root
        #   +-> child1
        plist = [
            {
                'uuid': uuids.root,
                'name': 'root_with_new_name',
                'generation': 1,
            },
            {
                'uuid': uuids.child1,
                'name': 'child1',
                'generation': 1,
                'parent_provider_uuid': uuids.root,
            },
        ]
        pt.populate_from_iterable(plist)
        expected_uuids = [uuids.root, uuids.child1]
        self.assertEqual(expected_uuids, pt.get_provider_uuids())
Exemplo n.º 10
0
    def test_have_traits_changed(self):
        cn = self.compute_node1
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)
        rp_gen = 1

        traits = [
            "HW_GPU_API_DIRECT3D_V7_0",
            "HW_NIC_OFFLOAD_SG",
            "HW_CPU_X86_AVX",
        ]
        self.assertTrue(pt.have_traits_changed(cn.uuid, traits))
        # A data-grab's traits are the same
        cnsnap = pt.data(cn.uuid)
        self.assertFalse(pt.have_traits_changed(cn.uuid, cnsnap.traits))
        self.assertTrue(pt.has_traits(cn.uuid, []))
        self.assertFalse(pt.has_traits(cn.uuid, traits))
        self.assertFalse(pt.has_traits(cn.uuid, traits[:1]))
        self.assertTrue(pt.update_traits(cn.uuid, traits, generation=rp_gen))
        self.assertTrue(pt.has_traits(cn.uuid, traits))
        self.assertTrue(pt.has_traits(cn.uuid, traits[:1]))

        # Updating with the same traits info should return False
        self.assertFalse(pt.have_traits_changed(cn.uuid, traits))
        # But the generation should get updated
        rp_gen = 2
        self.assertFalse(pt.update_traits(cn.uuid, traits, generation=rp_gen))
        self.assertFalse(pt.have_traits_changed(cn.uuid, traits))
        self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
        self.assertTrue(pt.has_traits(cn.uuid, traits))
        self.assertTrue(pt.has_traits(cn.uuid, traits[:1]))

        # Make a change to the traits list
        traits.append("HW_GPU_RESOLUTION_W800H600")
        self.assertTrue(pt.have_traits_changed(cn.uuid, traits))
        # The previously-taken data now differs
        self.assertTrue(pt.have_traits_changed(cn.uuid, cnsnap.traits))
        self.assertFalse(pt.has_traits(cn.uuid, traits[-1:]))
        # Don't update the generation
        self.assertTrue(pt.update_traits(cn.uuid, traits))
        self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
        self.assertTrue(pt.has_traits(cn.uuid, traits[-1:]))
Exemplo n.º 11
0
    def _update_provider_tree(self, allocations=None):
        """Host resource dict gets converted properly to provider tree inv."""

        with mock.patch('nova.virt.powervm.host.'
                        'build_host_resource_from_ms') as mock_bhrfm:
            mock_bhrfm.return_value = {
                'vcpus': 8,
                'memory_mb': 2048,
            }
            self.drv.host_wrapper = 'host_wrapper'
            # Validate that this gets converted to int with floor
            self.drv.disk_dvr = mock.Mock(capacity=2091.8)
            exp_inv = {
                'VCPU': {
                    'total': 8,
                    'max_unit': 8,
                    'allocation_ratio': 16.0,
                    'reserved': 0,
                },
                'MEMORY_MB': {
                    'total': 2048,
                    'max_unit': 2048,
                    'allocation_ratio': 1.5,
                    'reserved': 512,
                },
                'DISK_GB': {
                    'total': 2091,
                    'max_unit': 2091,
                    'allocation_ratio': 1.0,
                    'reserved': 0,
                },
            }
            ptree = provider_tree.ProviderTree()
            ptree.new_root('compute_host', uuids.cn)
            # Let the caller muck with these
            yield ptree, exp_inv
            self.drv.update_provider_tree(ptree,
                                          'compute_host',
                                          allocations=allocations)
            self.assertEqual(exp_inv, ptree.data('compute_host').inventory)
            mock_bhrfm.assert_called_once_with('host_wrapper')
Exemplo n.º 12
0
    def test_update_provider_tree_failure(self, check_chas, create_inv,
                                          create_child_inv):
        """Failing to update the RP tree test."""
        # Setup a valid resource provider tree for the test
        self.ptree = provider_tree.ProviderTree()

        # Setup other mocked calls for a successful test
        sys_col = self.RSD.driver.PODM.get_system_collection.return_value
        chas_col = self.RSD.driver.PODM.get_chassis_collection.return_value
        self.RSD.update_provider_tree(self.ptree,
                                      '/redfish/v1/Chassis/Chassis1')

        # Confirn that the provider tree for the placement API was not updated
        # correctly and no new nodes were created
        self.RSD.driver.PODM.get_system_collection.assert_called_once()
        self.RSD.driver.PODM.get_chassis_collection.assert_called()
        chas_col.get_member.assert_not_called()
        check_chas.assert_not_called()
        sys_col.get_member.assert_not_called()
        create_child_inv.assert_not_called()
        create_inv.assert_not_called()
Exemplo n.º 13
0
    def test_update_provider_tree(self, call):
        host_info = {'vcpus': 84,
                     'disk_total': 2000,
                     'memory_mb': 78192}
        call.return_value = host_info
        expected_inv = {
            'VCPU': {
                'total': 84,
                'min_unit': 1,
                'max_unit': 84,
                'step_size': 1,
                'allocation_ratio': CONF.initial_cpu_allocation_ratio,
                'reserved': CONF.reserved_host_cpus,
            },
            'MEMORY_MB': {
                'total': 78192,
                'min_unit': 1,
                'max_unit': 78192,
                'step_size': 1,
                'allocation_ratio': CONF.initial_ram_allocation_ratio,
                'reserved': CONF.reserved_host_memory_mb,
            },
            'DISK_GB': {
                'total': 2000,
                'min_unit': 1,
                'max_unit': 2000,
                'step_size': 1,
                'allocation_ratio': CONF.initial_disk_allocation_ratio,
                'reserved': CONF.reserved_host_disk_mb,
            },
        }

        pt = provider_tree.ProviderTree()
        nodename = 'fake-node'
        pt.new_root(nodename, uuidsentinel.rp_uuid)
        self._driver.update_provider_tree(pt, nodename)
        inv = pt.data(nodename).inventory
        self.assertEqual(expected_inv, inv)
Exemplo n.º 14
0
    def test_update_provider_tree_success(self, check_chas, create_inv,
                                          create_child_inv):
        """Successfully updating the RP tree test."""
        # Setup a valid resource provider tree for the test
        self.ptree = provider_tree.ProviderTree()
        self.ptree.new_root('/redfish/v1/Chassis/Chassis1', uuids.cn)

        # Setup other mocked calls for a successful test
        chas_col = self.RSD.driver.PODM.get_chassis_collection.return_value
        chas_col.members_identities = ['/redfish/v1/Chassis/Chassis1']
        chas_col.get_member.return_value = self.chassis_inst
        check_chas.return_value = ['/redfish/v1/Systems/System1']
        self.RSD.update_provider_tree(self.ptree,
                                      '/redfish/v1/Chassis/Chassis1')

        # Confirm that the provider tree for the placement API has been
        # updated correctly with a child node for each compute system available
        self.RSD.driver.PODM.get_system_collection.assert_called_once()
        self.RSD.driver.PODM.get_chassis_collection.assert_called()
        chas_col.get_member.assert_called_with('/redfish/v1/Chassis/Chassis1')
        check_chas.assert_called_with(self.chassis_inst)
        create_child_inv.assert_called_once_with('/redfish/v1/Systems/System1')
        create_inv.assert_called_once_with(check_chas.return_value)
Exemplo n.º 15
0
    def test_update_from_provider_tree(self):
        """A "realistic" walk through the lifecycle of a compute node provider
        tree.
        """
        # NOTE(efried): We can use the same ProviderTree throughout, since
        # update_from_provider_tree doesn't change it.
        new_tree = provider_tree.ProviderTree()

        def assert_ptrees_equal():
            uuids = set(self.client._provider_tree.get_provider_uuids())
            self.assertEqual(uuids, set(new_tree.get_provider_uuids()))
            for uuid in uuids:
                cdata = self.client._provider_tree.data(uuid)
                ndata = new_tree.data(uuid)
                self.assertEqual(ndata.name, cdata.name)
                self.assertEqual(ndata.parent_uuid, cdata.parent_uuid)
                self.assertFalse(
                    new_tree.has_inventory_changed(uuid, cdata.inventory))
                self.assertFalse(
                    new_tree.have_traits_changed(uuid, cdata.traits))
                self.assertFalse(
                    new_tree.have_aggregates_changed(uuid, cdata.aggregates))

        # To begin with, the cache should be empty
        self.assertEqual([], self.client._provider_tree.get_provider_uuids())
        # When new_tree is empty, it's a no-op.
        # Do this outside the interceptor to prove no API calls are made.
        self.client.update_from_provider_tree(self.context, new_tree)
        assert_ptrees_equal()

        with self._interceptor():
            # Populate with a provider with no inventories, aggregates, traits
            new_tree.new_root('root', uuids.root)
            self.client.update_from_provider_tree(self.context, new_tree)
            assert_ptrees_equal()

            # Throw in some more providers, in various spots in the tree, with
            # some sub-properties
            new_tree.new_child('child1', uuids.root, uuid=uuids.child1)
            new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2])
            new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1)
            new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2'])
            new_tree.new_root('ssp', uuids.ssp)
            new_tree.update_inventory('ssp', {
                fields.ResourceClass.DISK_GB: {
                    'total': 100,
                    'reserved': 1,
                    'min_unit': 1,
                    'max_unit': 10,
                    'step_size': 2,
                    'allocation_ratio': 10.0,
                },
            })
            self.client.update_from_provider_tree(self.context, new_tree)
            assert_ptrees_equal()

            # Swizzle properties
            # Give the root some everything
            new_tree.update_inventory(uuids.root, {
                fields.ResourceClass.VCPU: {
                    'total': 10,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 2,
                    'step_size': 1,
                    'allocation_ratio': 10.0,
                },
                fields.ResourceClass.MEMORY_MB: {
                    'total': 1048576,
                    'reserved': 2048,
                    'min_unit': 1024,
                    'max_unit': 131072,
                    'step_size': 1024,
                    'allocation_ratio': 1.0,
                },
            })
            new_tree.update_aggregates(uuids.root, [uuids.agg1])
            new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
                                                'HW_CPU_X86_AVX2'])
            # Take away the child's aggregates
            new_tree.update_aggregates(uuids.child1, [])
            # Grandchild gets some inventory
            ipv4_inv = {
                fields.ResourceClass.IPV4_ADDRESS: {
                    'total': 128,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 8,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            new_tree.update_inventory('grandchild1_1', ipv4_inv)
            # Shared storage provider gets traits
            new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE',
                                               'STORAGE_DISK_SSD']))
            self.client.update_from_provider_tree(self.context, new_tree)
            assert_ptrees_equal()

            # Let's go for some error scenarios.
            # Add inventory in an invalid resource class
            new_tree.update_inventory(
                'grandchild1_1',
                dict(ipv4_inv,
                     MOTSUC_BANDWIDTH={
                         'total': 1250000,
                         'reserved': 10000,
                         'min_unit': 5000,
                         'max_unit': 250000,
                         'step_size': 5000,
                         'allocation_ratio': 8.0,
                     }))
            self.assertRaises(
                exception.ResourceProviderSyncFailed,
                self.client.update_from_provider_tree, self.context, new_tree)
            # The inventory update didn't get synced...
            self.assertIsNone(self.client._get_inventory(
                self.context, uuids.grandchild1_1))
            # ...and the grandchild was removed from the cache
            self.assertFalse(
                self.client._provider_tree.exists('grandchild1_1'))

            # Fix that problem so we can try the next one
            new_tree.update_inventory(
                'grandchild1_1',
                dict(ipv4_inv,
                     CUSTOM_BANDWIDTH={
                         'total': 1250000,
                         'reserved': 10000,
                         'min_unit': 5000,
                         'max_unit': 250000,
                         'step_size': 5000,
                         'allocation_ratio': 8.0,
                     }))

            # Add a bogus trait
            new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
                                                'HW_CPU_X86_AVX2',
                                                'MOTSUC_FOO'])
            self.assertRaises(
                exception.ResourceProviderSyncFailed,
                self.client.update_from_provider_tree, self.context, new_tree)
            # Placement didn't get updated
            self.assertEqual(set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']),
                             self.client._get_provider_traits(self.context,
                                                              uuids.root))
            # ...and the root was removed from the cache
            self.assertFalse(self.client._provider_tree.exists(uuids.root))

            # Fix that problem
            new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
                                                'HW_CPU_X86_AVX2',
                                                'CUSTOM_FOO'])

            # Now the sync should work
            self.client.update_from_provider_tree(self.context, new_tree)
            assert_ptrees_equal()

            # Let's cause a conflict error by doing an "out-of-band" update
            gen = self.client._provider_tree.data(uuids.ssp).generation
            self.assertTrue(self.client.put(
                '/resource_providers/%s/traits' % uuids.ssp,
                {'resource_provider_generation': gen,
                 'traits': ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_HDD']},
                version='1.6'))

            # Now if we try to modify the traits, we should fail and invalidate
            # the cache...
            new_tree.update_traits(uuids.ssp, ['MISC_SHARES_VIA_AGGREGATE',
                                               'STORAGE_DISK_SSD',
                                               'CUSTOM_FAST'])
            self.assertRaises(
                exception.ResourceProviderSyncFailed,
                self.client.update_from_provider_tree, self.context, new_tree)
            # ...but the next iteration will refresh the cache with the latest
            # generation and so the next attempt should succeed.
            self.client.update_from_provider_tree(self.context, new_tree)
            # The out-of-band change is blown away, as it should be.
            assert_ptrees_equal()

            # Let's delete some stuff
            new_tree.remove(uuids.ssp)
            self.assertFalse(new_tree.exists('ssp'))
            new_tree.remove('child1')
            self.assertFalse(new_tree.exists('child1'))
            # Removing a node removes its descendants too
            self.assertFalse(new_tree.exists('grandchild1_1'))
            self.client.update_from_provider_tree(self.context, new_tree)
            assert_ptrees_equal()

            # Remove the last provider
            new_tree.remove(uuids.root)
            self.assertEqual([], new_tree.get_provider_uuids())
            self.client.update_from_provider_tree(self.context, new_tree)
            assert_ptrees_equal()

            # Having removed the providers this way, they ought to be gone
            # from placement
            for uuid in (uuids.root, uuids.child1, uuids.grandchild1_1,
                         uuids.ssp):
                resp = self.client.get('/resource_providers/%s' % uuid)
                self.assertEqual(404, resp.status_code)
Exemplo n.º 16
0
    def setUp(self, mock_connector, pod_conn):
        """Initial setup of mocks for all of the unit tests."""
        super(TestRSDDriver, self).setUp()
        # Mock out the connection to the RSD redfish API
        self.root_conn = mock.MagicMock()
        mock_connector.return_value = self.root_conn

        # Create sample collections and instances of Chassis/System/Nodes
        with open('rsd_virt_for_nova/tests/json_samples/root.json', 'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())
        self.rsd = rsd_lib.main.RSDLib('http://foo.bar:8442',
                                       username='******',
                                       password='******',
                                       verify=False).factory()

        with open('rsd_virt_for_nova/tests/json_samples/chassis_col.json',
                  'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())
        self.chassis_col = chassis.ChassisCollection(self.root_conn,
                                                     '/redfish/v1/Chassis',
                                                     redfish_version='1.0.2')

        with open('rsd_virt_for_nova/tests/json_samples/chassis.json',
                  'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())

        self.chassis_inst = chassis.Chassis(self.root_conn,
                                            '/redfish/v1/Chassis/Chassis1',
                                            redfish_version='1.0.2')

        with open('rsd_virt_for_nova/tests/json_samples/node_col.json',
                  'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())
        self.node_collection = node.NodeCollection(self.root_conn,
                                                   '/redfish/v1/Nodes',
                                                   redfish_version='1.0.2')

        with open('rsd_virt_for_nova/tests/json_samples/node.json', 'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())
        self.node_inst = node.Node(self.root_conn,
                                   '/redfish/v1/Nodes/Node1',
                                   redfish_version='1.0.2')

        with open('rsd_virt_for_nova/tests/json_samples/node_assembled.json',
                  'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())
        self.node_ass_inst = node.Node(self.root_conn,
                                       '/redfish/v1/Nodes/Node1',
                                       redfish_version='1.0.2')

        with open('rsd_virt_for_nova/tests/json_samples/sys_collection.json',
                  'r') as f:
            self.root_conn.get.return_value.json.return_value = \
                json.loads(f.read())
        self.system_col = system.SystemCollection(self.root_conn,
                                                  '/redfish/v1/Systems',
                                                  redfish_version='1.0.2')

        with open('rsd_virt_for_nova/tests/json_samples/system.json',
                  'r') as f:
            self.root_conn.get.return_value.json.return_value = json.loads(
                f.read())
        self.system_inst = system.System(self.root_conn,
                                         '/redfish/v1/Systems/System1',
                                         redfish_version='1.0.2')

        # Mock out a fake virt driver and its dependencies/parameters
        self.RSD = driver.RSDDriver(fake.FakeVirtAPI())

        # Create Fake flavors and instances
        gb = self.system_inst.memory_summary.size_gib
        mem = self.RSD.conv_GiB_to_MiB(gb)
        proc = self.system_inst.processors.summary.count
        flav_id = str(mem) + 'MB-' + str(proc) + 'vcpus'
        res = fields.ResourceClass.normalize_name(self.system_inst.identity)
        spec = 'resources:' + res
        # Mock out some instances for testing
        self.flavor = FakeFlavor(gb, mem, str('RSD.' + flav_id),
                                 self.system_inst.identity, spec)
        self.inst1 = FakeInstance('inst1', power_state.RUNNING, 'inst1id',
                                  self.flavor)
        self.invalid_inst = FakeInstance('inv_inst', power_state.RUNNING,
                                         'inv_inst_id', self.flavor)
        self.RSD.instances = {self.inst1.uuid: self.inst1}

        # A provider tree for testing on the placement API
        self.ptree = provider_tree.ProviderTree()

        self.test_image_meta = {
            "disk_format": "raw",
        }
Exemplo n.º 17
0
    def test_has_inventory_changed(self):
        cn = self.compute_node1
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)
        rp_gen = 1

        cn_inv = {
            'VCPU': {
                'total': 8,
                'reserved': 0,
                'min_unit': 1,
                'max_unit': 8,
                'step_size': 1,
                'allocation_ratio': 16.0,
            },
            'MEMORY_MB': {
                'total': 1024,
                'reserved': 512,
                'min_unit': 64,
                'max_unit': 1024,
                'step_size': 64,
                'allocation_ratio': 1.5,
            },
            'DISK_GB': {
                'total': 1000,
                'reserved': 100,
                'min_unit': 10,
                'max_unit': 1000,
                'step_size': 10,
                'allocation_ratio': 1.0,
            },
        }
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        # Updating with the same inventory info should return False
        self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        # A data-grab's inventory should be "equal" to the original
        cndata = pt.data(cn.uuid)
        self.assertFalse(pt.has_inventory_changed(cn.uuid, cndata.inventory))

        cn_inv['VCPU']['total'] = 6
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        # The data() result was not affected; now the tree's copy is different
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cndata.inventory))

        self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        # Deleting a key in the new record should NOT result in changes being
        # recorded...
        del cn_inv['VCPU']['allocation_ratio']
        self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertFalse(pt.update_inventory(cn.uuid, cn_inv, rp_gen))

        del cn_inv['MEMORY_MB']
        self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
        self.assertTrue(pt.update_inventory(cn.uuid, cn_inv, rp_gen))
Exemplo n.º 18
0
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cn2 = self.compute_node2
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
            1,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        self.assertEqual(set([cn1.uuid]),
                         pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(set([cn1.uuid, cn2.uuid]), pt.get_provider_uuids())

        numa_cell0_uuid = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1_uuid = pt.new_child('numa_cell1', cn1.uuid)

        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0_uuid = pt.new_child('pf1_cell0', numa_cell0_uuid)
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        # Now we've got a 3-level tree under cn1 - check provider UUIDs again
        self.assertEqual(
            set([cn1.uuid, numa_cell0_uuid, pf1_cell0_uuid, numa_cell1_uuid]),
            pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(
            set([cn1.uuid, cn2.uuid, numa_cell0_uuid, pf1_cell0_uuid,
                 numa_cell1_uuid]),
            pt.get_provider_uuids())

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        # Test data().
        # Root, by UUID
        cn1_snap = pt.data(cn1.uuid)
        # Fields were faithfully copied
        self.assertEqual(cn1.uuid, cn1_snap.uuid)
        self.assertEqual(cn1.hypervisor_hostname, cn1_snap.name)
        self.assertIsNone(cn1_snap.parent_uuid)
        self.assertEqual({}, cn1_snap.inventory)
        self.assertEqual(set(), cn1_snap.traits)
        self.assertEqual(set(), cn1_snap.aggregates)
        # Validate read-only-ness
        self.assertRaises(AttributeError, setattr, cn1_snap, 'name', 'foo')

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
            1,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        pt.remove(numa_cell1_uuid)
        self.assertFalse(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_cell0_uuid))
        self.assertFalse(pt.exists(numa_cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
            1,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        numa_cell0 = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1 = pt.new_child('numa_cell1', cn1.uuid)

        self.assertEqual(numa_cell0, pt.find('numa_cell0'))
        self.assertEqual(numa_cell0, pt.find(numa_cell0.uuid))

        self.assertTrue(pt.exists(numa_cell0.uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1.uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0 = pt.new_child('pf1_cell0', numa_cell0.uuid)
        self.assertTrue(pt.exists(pf1_cell0.uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
            1,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        # Save numa cell1 uuid since removing will invalidate the object
        cell0_uuid = numa_cell0.uuid
        cell1_uuid = numa_cell1.uuid
        pf1_uuid = pf1_cell0.uuid

        pt.remove(cell1_uuid)
        self.assertFalse(pt.exists(cell1_uuid))
        self.assertTrue(pt.exists(pf1_uuid))
        self.assertTrue(pt.exists(cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_uuid))
        self.assertFalse(pt.exists(cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
Exemplo n.º 20
0
 def test_update_aggregates_no_existing_rp(self):
     cns = self.compute_nodes
     pt = provider_tree.ProviderTree(cns)
     self.assertRaises(ValueError, pt.update_aggregates,
                       uuids.non_existing_rp, [])
Exemplo n.º 21
0
 def _pt_with_cns(self):
     pt = provider_tree.ProviderTree()
     for cn in self.compute_nodes:
         pt.new_root(cn.hypervisor_hostname, cn.uuid, generation=0)
     return pt
Exemplo n.º 22
0
    def test_populate_from_iterable_complex(self):
        # root
        #   +-> child1
        #   |      +-> grandchild1_2
        #   |             +-> ggc1_2_1
        #   |             +-> ggc1_2_2
        #   |             +-> ggc1_2_3
        #   +-> child2
        # another_root
        pt = provider_tree.ProviderTree()
        plist = [
            {
                'uuid': uuids.root,
                'name': 'root',
                'generation': 0,
            },
            {
                'uuid': uuids.child1,
                'name': 'child1',
                'generation': 1,
                'parent_provider_uuid': uuids.root,
            },
            {
                'uuid': uuids.child2,
                'name': 'child2',
                'generation': 2,
                'parent_provider_uuid': uuids.root,
            },
            {
                'uuid': uuids.grandchild1_2,
                'name': 'grandchild1_2',
                'generation': 12,
                'parent_provider_uuid': uuids.child1,
            },
            {
                'uuid': uuids.ggc1_2_1,
                'name': 'ggc1_2_1',
                'generation': 121,
                'parent_provider_uuid': uuids.grandchild1_2,
            },
            {
                'uuid': uuids.ggc1_2_2,
                'name': 'ggc1_2_2',
                'generation': 122,
                'parent_provider_uuid': uuids.grandchild1_2,
            },
            {
                'uuid': uuids.ggc1_2_3,
                'name': 'ggc1_2_3',
                'generation': 123,
                'parent_provider_uuid': uuids.grandchild1_2,
            },
            {
                'uuid': uuids.another_root,
                'name': 'another_root',
                'generation': 911,
            },
        ]
        pt.populate_from_iterable(plist)

        def validate_root(expected_uuids):
            # Make sure we have all and only the expected providers
            self.assertEqual(expected_uuids, set(pt.get_provider_uuids()))
            # Now make sure they're in the right hierarchy.  Cheat: get the
            # actual _Provider to make it easier to walk the tree (ProviderData
            # doesn't include children).
            root = pt._find_with_lock(uuids.root)
            self.assertEqual(uuids.root, root.uuid)
            self.assertEqual('root', root.name)
            self.assertEqual(0, root.generation)
            self.assertIsNone(root.parent_uuid)
            self.assertEqual(2, len(list(root.children)))
            for child in root.children.values():
                self.assertTrue(child.name.startswith('child'))
                if child.name == 'child1':
                    if uuids.grandchild1_1 in expected_uuids:
                        self.assertEqual(2, len(list(child.children)))
                    else:
                        self.assertEqual(1, len(list(child.children)))
                    for grandchild in child.children.values():
                        self.assertTrue(
                            grandchild.name.startswith('grandchild1_'))
                        if grandchild.name == 'grandchild1_1':
                            self.assertEqual(0, len(list(grandchild.children)))
                        if grandchild.name == 'grandchild1_2':
                            self.assertEqual(3, len(list(grandchild.children)))
                            for ggc in grandchild.children.values():
                                self.assertTrue(ggc.name.startswith('ggc1_2_'))
            another_root = pt._find_with_lock(uuids.another_root)
            self.assertEqual(uuids.another_root, another_root.uuid)
            self.assertEqual('another_root', another_root.name)
            self.assertEqual(911, another_root.generation)
            self.assertIsNone(another_root.parent_uuid)
            self.assertEqual(0, len(list(another_root.children)))
            if uuids.new_root in expected_uuids:
                new_root = pt._find_with_lock(uuids.new_root)
                self.assertEqual(uuids.new_root, new_root.uuid)
                self.assertEqual('new_root', new_root.name)
                self.assertEqual(42, new_root.generation)
                self.assertIsNone(new_root.parent_uuid)
                self.assertEqual(0, len(list(new_root.children)))

        expected_uuids = set([
            uuids.root, uuids.child1, uuids.child2, uuids.grandchild1_2,
            uuids.ggc1_2_1, uuids.ggc1_2_2, uuids.ggc1_2_3, uuids.another_root
        ])

        validate_root(expected_uuids)

        # Merge an orphan - still an error
        orphan = {
            'uuid': uuids.orphan,
            'name': 'orphan',
            'generation': 86,
            'parent_provider_uuid': uuids.mystery,
        }
        self.assertRaises(ValueError, pt.populate_from_iterable, [orphan])

        # And the tree didn't change
        validate_root(expected_uuids)

        # Merge a list with a new grandchild and a new root
        plist = [
            {
                'uuid': uuids.grandchild1_1,
                'name': 'grandchild1_1',
                'generation': 11,
                'parent_provider_uuid': uuids.child1,
            },
            {
                'uuid': uuids.new_root,
                'name': 'new_root',
                'generation': 42,
            },
        ]
        pt.populate_from_iterable(plist)

        expected_uuids |= set([uuids.grandchild1_1, uuids.new_root])

        validate_root(expected_uuids)

        # Merge an empty list - still a no-op
        pt.populate_from_iterable([])
        validate_root(expected_uuids)

        # Since we have a complex tree, test the ordering of get_provider_uuids
        # We can't predict the order of siblings, or where nephews will appear
        # relative to their uncles, but we can guarantee that any given child
        # always comes after its parent (and by extension, its ancestors too).
        puuids = pt.get_provider_uuids()
        for desc in (uuids.child1, uuids.child2):
            self.assertGreater(puuids.index(desc), puuids.index(uuids.root))
        for desc in (uuids.grandchild1_1, uuids.grandchild1_2):
            self.assertGreater(puuids.index(desc), puuids.index(uuids.child1))
        for desc in (uuids.ggc1_2_1, uuids.ggc1_2_2, uuids.ggc1_2_3):
            self.assertGreater(puuids.index(desc),
                               puuids.index(uuids.grandchild1_2))
Exemplo n.º 23
0
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cn2 = self.compute_node2
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
            1,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        self.assertEqual(set([cn1.uuid]),
                         pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(set([cn1.uuid, cn2.uuid]), pt.get_provider_uuids())

        numa_cell0_uuid = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1_uuid = pt.new_child('numa_cell1', cn1.uuid)

        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0_uuid = pt.new_child('pf1_cell0', numa_cell0_uuid)
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        # Now we've got a 3-level tree under cn1 - check provider UUIDs again
        self.assertEqual(
            set([cn1.uuid, numa_cell0_uuid, pf1_cell0_uuid, numa_cell1_uuid]),
            pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(
            set([
                cn1.uuid, cn2.uuid, numa_cell0_uuid, pf1_cell0_uuid,
                numa_cell1_uuid
            ]), pt.get_provider_uuids())

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
            1,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        pt.remove(numa_cell1_uuid)
        self.assertFalse(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_cell0_uuid))
        self.assertFalse(pt.exists(numa_cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
Exemplo n.º 24
0
 def test_populate_from_iterable_empty(self):
     pt = provider_tree.ProviderTree()
     # Empty list is a no-op
     pt.populate_from_iterable([])
     self.assertEqual([], pt.get_provider_uuids())