Пример #1
0
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cn2 = self.compute_node2
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
            1,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        self.assertEqual(set([cn1.uuid]),
                         pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(set([cn1.uuid, cn2.uuid]), pt.get_provider_uuids())

        numa_cell0_uuid = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1_uuid = pt.new_child('numa_cell1', cn1.uuid)

        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0_uuid = pt.new_child('pf1_cell0', numa_cell0_uuid)
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        # Now we've got a 3-level tree under cn1 - check provider UUIDs again
        self.assertEqual(
            set([cn1.uuid, numa_cell0_uuid, pf1_cell0_uuid, numa_cell1_uuid]),
            pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(
            set([cn1.uuid, cn2.uuid, numa_cell0_uuid, pf1_cell0_uuid,
                 numa_cell1_uuid]),
            pt.get_provider_uuids())

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        # Test data().
        # Root, by UUID
        cn1_snap = pt.data(cn1.uuid)
        # Fields were faithfully copied
        self.assertEqual(cn1.uuid, cn1_snap.uuid)
        self.assertEqual(cn1.hypervisor_hostname, cn1_snap.name)
        self.assertIsNone(cn1_snap.parent_uuid)
        self.assertEqual({}, cn1_snap.inventory)
        self.assertEqual(set(), cn1_snap.traits)
        self.assertEqual(set(), cn1_snap.aggregates)
        # Validate read-only-ness
        self.assertRaises(AttributeError, setattr, cn1_snap, 'name', 'foo')

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
            1,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        pt.remove(numa_cell1_uuid)
        self.assertFalse(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_cell0_uuid))
        self.assertFalse(pt.exists(numa_cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
Пример #2
0
    def test_execute_with_destination(self, mock_get_az):
        dest_node = objects.ComputeNode(hypervisor_hostname='dest_node')
        with test.nested(
            mock.patch.object(self.task, '_check_host_is_up'),
            mock.patch.object(self.task, '_check_requested_destination'),
            mock.patch.object(scheduler_utils,
                              'claim_resources_on_destination'),
            mock.patch.object(self.migration, 'save'),
            mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
            mock.patch('nova.conductor.tasks.migrate.'
                       'replace_allocation_with_migration'),
            mock.patch.object(self.task, '_check_destination_is_not_source'),
            mock.patch.object(self.task,
                              '_check_destination_has_enough_memory'),
            mock.patch.object(self.task,
                              '_check_compatible_with_source_hypervisor',
                              return_value=(mock.sentinel.source_node,
                                            dest_node)),
        ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig,
              m_alloc, m_check_diff, m_check_enough_mem, m_check_compatible):
            mock_mig.return_value = "bob"
            m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs)

            self.assertEqual("bob", self.task.execute())
            mock_check_up.assert_has_calls([
                mock.call(self.instance_host), mock.call(self.destination)])
            mock_check_dest.assert_called_once_with()
            m_check_diff.assert_called_once()
            m_check_enough_mem.assert_called_once()
            m_check_compatible.assert_called_once()
            allocs = mock.sentinel.allocs
            mock_claim.assert_called_once_with(
                self.context, self.task.report_client,
                self.instance, mock.sentinel.source_node, dest_node,
                source_allocations=allocs, consumer_generation=None)
            mock_mig.assert_called_once_with(
                self.context,
                host=self.instance_host,
                instance=self.instance,
                dest=self.destination,
                block_migration=self.block_migration,
                migration=self.migration,
                migrate_data=None)
            self.assertTrue(mock_save.called)
            mock_get_az.assert_called_once_with(self.context, self.destination)
            self.assertEqual('fake-az', self.instance.availability_zone)
            # make sure the source/dest fields were set on the migration object
            self.assertEqual(self.instance.node, self.migration.source_node)
            self.assertEqual(dest_node.hypervisor_hostname,
                             self.migration.dest_node)
            self.assertEqual(self.task.destination,
                             self.migration.dest_compute)
            m_alloc.assert_called_once_with(self.context,
                                            self.instance,
                                            self.migration)
        # When the task is executed with a destination it means the host is
        # being forced and we don't call the scheduler, so we don't need to
        # heal the request spec.
        self.heal_reqspec_is_bfv_mock.assert_not_called()

        # When the task is executed with a destination it means the host is
        # being forced and we don't call the scheduler, so we don't need to
        # modify the request spec
        self.ensure_network_metadata_mock.assert_not_called()
Пример #3
0
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cn2 = self.compute_node2
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
            1,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        self.assertEqual(set([cn1.uuid]),
                         pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(set([cn1.uuid, cn2.uuid]), pt.get_provider_uuids())

        numa_cell0_uuid = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1_uuid = pt.new_child('numa_cell1', cn1.uuid)

        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0_uuid = pt.new_child('pf1_cell0', numa_cell0_uuid)
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        # Now we've got a 3-level tree under cn1 - check provider UUIDs again
        self.assertEqual(
            set([cn1.uuid, numa_cell0_uuid, pf1_cell0_uuid, numa_cell1_uuid]),
            pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        self.assertEqual(
            set([
                cn1.uuid, cn2.uuid, numa_cell0_uuid, pf1_cell0_uuid,
                numa_cell1_uuid
            ]), pt.get_provider_uuids())

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
            1,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        pt.remove(numa_cell1_uuid)
        self.assertFalse(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_cell0_uuid))
        self.assertFalse(pt.exists(numa_cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
Пример #4
0
    'numa_topology': fake_numa_topology_db_format,
    'hypervisor_type': 'fake-type',
    'hypervisor_version': 1,
    'hypervisor_hostname': 'fake-host',
    'disk_available_least': 256,
    'host_ip': fake_host_ip,
    'supported_instances': fake_supported_instances
}
fake_compute_with_resources = objects.ComputeNode(
    vcpus=fake_resources['vcpus'],
    memory_mb=fake_resources['memory_mb'],
    local_gb=fake_resources['local_gb'],
    cpu_info=fake_resources['cpu_info'],
    vcpus_used=fake_resources['vcpus_used'],
    memory_mb_used=fake_resources['memory_mb_used'],
    local_gb_used =fake_resources['local_gb_used'],
    numa_topology=fake_resources['numa_topology'],
    hypervisor_type=fake_resources['hypervisor_type'],
    hypervisor_version=fake_resources['hypervisor_version'],
    hypervisor_hostname=fake_resources['hypervisor_hostname'],
    disk_available_least=fake_resources['disk_available_least'],
    host_ip=netaddr.IPAddress(fake_resources['host_ip']),
    supported_hv_specs=fake_supported_hv_specs,
)


class _TestComputeNodeObject(object):
    def supported_hv_specs_comparator(self, expected, obj_val):
        obj_val = [inst.to_list() for inst in obj_val]
        self.assertJsonEqual(expected, obj_val)

    def pci_device_pools_comparator(self, expected, obj_val):
Пример #5
0
 def test_compat_service_id_compute_host_not_found(self, mock_get):
     mock_get.side_effect = exception.ComputeHostNotFound(host='fake-host')
     compute = objects.ComputeNode(host='fake-host', service_id=None)
     primitive = compute.obj_to_primitive(target_version='1.12')
     self.assertEqual(-1, primitive['nova_object.data']['service_id'])
Пример #6
0
    def setUp(self):
        super(NovaManageDBIronicTest, self).setUp()
        self.commands = manage.DbCommands()
        self.context = context.RequestContext('fake-user', 'fake-project')

        self.service1 = objects.Service(context=self.context,
                                        host='fake-host1',
                                        binary='nova-compute',
                                        topic='fake-host1',
                                        report_count=1,
                                        disabled=False,
                                        disabled_reason=None,
                                        availability_zone='nova',
                                        forced_down=False)
        self.service1.create()

        self.service2 = objects.Service(context=self.context,
                                        host='fake-host2',
                                        binary='nova-compute',
                                        topic='fake-host2',
                                        report_count=1,
                                        disabled=False,
                                        disabled_reason=None,
                                        availability_zone='nova',
                                        forced_down=False)
        self.service2.create()

        self.service3 = objects.Service(context=self.context,
                                        host='fake-host3',
                                        binary='nova-compute',
                                        topic='fake-host3',
                                        report_count=1,
                                        disabled=False,
                                        disabled_reason=None,
                                        availability_zone='nova',
                                        forced_down=False)
        self.service3.create()

        self.cn1 = objects.ComputeNode(context=self.context,
                                       service_id=self.service1.id,
                                       host='fake-host1',
                                       hypervisor_type='ironic',
                                       vcpus=1,
                                       memory_mb=1024,
                                       local_gb=10,
                                       vcpus_used=1,
                                       memory_mb_used=1024,
                                       local_gb_used=10,
                                       hypervisor_version=0,
                                       hypervisor_hostname='fake-node1',
                                       cpu_info='{}')
        self.cn1.create()

        self.cn2 = objects.ComputeNode(context=self.context,
                                       service_id=self.service1.id,
                                       host='fake-host1',
                                       hypervisor_type='ironic',
                                       vcpus=1,
                                       memory_mb=1024,
                                       local_gb=10,
                                       vcpus_used=1,
                                       memory_mb_used=1024,
                                       local_gb_used=10,
                                       hypervisor_version=0,
                                       hypervisor_hostname='fake-node2',
                                       cpu_info='{}')
        self.cn2.create()

        self.cn3 = objects.ComputeNode(context=self.context,
                                       service_id=self.service2.id,
                                       host='fake-host2',
                                       hypervisor_type='ironic',
                                       vcpus=1,
                                       memory_mb=1024,
                                       local_gb=10,
                                       vcpus_used=1,
                                       memory_mb_used=1024,
                                       local_gb_used=10,
                                       hypervisor_version=0,
                                       hypervisor_hostname='fake-node3',
                                       cpu_info='{}')
        self.cn3.create()

        self.cn4 = objects.ComputeNode(context=self.context,
                                       service_id=self.service3.id,
                                       host='fake-host3',
                                       hypervisor_type='libvirt',
                                       vcpus=1,
                                       memory_mb=1024,
                                       local_gb=10,
                                       vcpus_used=1,
                                       memory_mb_used=1024,
                                       local_gb_used=10,
                                       hypervisor_version=0,
                                       hypervisor_hostname='fake-node4',
                                       cpu_info='{}')
        self.cn4.create()

        self.cn5 = objects.ComputeNode(context=self.context,
                                       service_id=self.service2.id,
                                       host='fake-host2',
                                       hypervisor_type='ironic',
                                       vcpus=1,
                                       memory_mb=1024,
                                       local_gb=10,
                                       vcpus_used=1,
                                       memory_mb_used=1024,
                                       local_gb_used=10,
                                       hypervisor_version=0,
                                       hypervisor_hostname='fake-node5',
                                       cpu_info='{}')
        self.cn5.create()

        self.insts = []
        for cn in (self.cn1, self.cn2, self.cn3, self.cn4, self.cn4, self.cn5):
            flavor = objects.Flavor(extra_specs={})
            inst = objects.Instance(context=self.context,
                                    user_id=self.context.user_id,
                                    project_id=self.context.project_id,
                                    flavor=flavor,
                                    node=cn.hypervisor_hostname)
            inst.create()
            self.insts.append(inst)

        self.ironic_insts = [
            i for i in self.insts if i.node != self.cn4.hypervisor_hostname
        ]
        self.virt_insts = [
            i for i in self.insts if i.node == self.cn4.hypervisor_hostname
        ]
class IronicResourceTrackerTest(test.TestCase):
    """Tests the behaviour of the resource tracker with regards to the
    transitional period between adding support for custom resource classes in
    the placement API and integrating inventory and allocation records for
    Ironic baremetal nodes with those custom resource classes.
    """

    FLAVOR_FIXTURES = {
        'CUSTOM_SMALL_IRON': objects.Flavor(
            name='CUSTOM_SMALL_IRON',
            flavorid=42,
            vcpus=4,
            memory_mb=4096,
            root_gb=1024,
            swap=0,
            ephemeral_gb=0,
            extra_specs={},
    ),
        'CUSTOM_BIG_IRON': objects.Flavor(
            name='CUSTOM_BIG_IRON',
            flavorid=43,
            vcpus=16,
            memory_mb=65536,
            root_gb=1024,
            swap=0,
            ephemeral_gb=0,
            extra_specs={},
        ),
    }

    COMPUTE_NODE_FIXTURES = {
        uuids.cn1: objects.ComputeNode(
            uuid=uuids.cn1,
            hypervisor_hostname='cn1',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=4,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=4096,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=1024,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
        uuids.cn2: objects.ComputeNode(
            uuid=uuids.cn2,
            hypervisor_hostname='cn2',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=4,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=4096,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=1024,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
        uuids.cn3: objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='cn3',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=16,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=65536,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=2048,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
    }

    INSTANCE_FIXTURES = {
        uuids.instance1: objects.Instance(
            uuid=uuids.instance1,
            flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'],
            vm_state=vm_states.BUILDING,
            task_state=task_states.SPAWNING,
            power_state=power_state.RUNNING,
            project_id='project',
            user_id=uuids.user,
        ),
    }

    def _set_client(self, client):
        """Set up embedded report clients to use the direct one from the
        interceptor.
        """
        self.report_client = client
        self.rt.reportclient = client

    def setUp(self):
        super(IronicResourceTrackerTest, self).setUp()
        self.flags(
            reserved_host_memory_mb=0,
            cpu_allocation_ratio=1.0,
            ram_allocation_ratio=1.0,
            disk_allocation_ratio=1.0,
        )

        self.ctx = context.RequestContext('user', 'project')

        driver = mock.MagicMock(autospec=virt_driver.ComputeDriver)
        driver.node_is_available.return_value = True

        def fake_upt(provider_tree, nodename, allocations=None):
            inventory = {
                'CUSTOM_SMALL_IRON': {
                    'total': 1,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 1,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            provider_tree.update_inventory(nodename, inventory)

        driver.update_provider_tree.side_effect = fake_upt
        self.driver_mock = driver
        self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver)
        self.instances = self.create_fixtures()

    def create_fixtures(self):
        for flavor in self.FLAVOR_FIXTURES.values():
            # Clone the object so the class variable isn't
            # modified by reference.
            flavor = flavor.obj_clone()
            flavor._context = self.ctx
            flavor.obj_set_defaults()
            flavor.create()

        # We create some compute node records in the Nova cell DB to simulate
        # data before adding integration for Ironic baremetal nodes with the
        # placement API...
        for cn in self.COMPUTE_NODE_FIXTURES.values():
            # Clone the object so the class variable isn't
            # modified by reference.
            cn = cn.obj_clone()
            cn._context = self.ctx
            cn.obj_set_defaults()
            cn.create()

        instances = {}
        for instance in self.INSTANCE_FIXTURES.values():
            # Clone the object so the class variable isn't
            # modified by reference.
            instance = instance.obj_clone()
            instance._context = self.ctx
            instance.obj_set_defaults()
            instance.create()
            instances[instance.uuid] = instance
        return instances

    @mock.patch('nova.compute.utils.is_volume_backed_instance',
                new=mock.Mock(return_value=False))
    @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
    def test_node_stats_isolation(self):
        """Regression test for bug 1784705 introduced in Ocata.

        The ResourceTracker.stats field is meant to track per-node stats
        so this test registers three compute nodes with a single RT where
        each node has unique stats, and then makes sure that after updating
        usage for an instance, the nodes still have their unique stats and
        nothing is leaked from node to node.
        """
        self.useFixture(func_fixtures.PlacementFixture())
        # Before the resource tracker is "initialized", we shouldn't have
        # any compute nodes or stats in the RT's cache...
        self.assertEqual(0, len(self.rt.compute_nodes))
        self.assertEqual(0, len(self.rt.stats))

        # Now "initialize" the resource tracker. This is what
        # nova.compute.manager.ComputeManager does when "initializing" the
        # nova-compute service. Do this in a predictable order so cn1 is
        # first and cn3 is last.
        for cn in sorted(self.COMPUTE_NODE_FIXTURES.values(),
                         key=lambda _cn: _cn.hypervisor_hostname):
            nodename = cn.hypervisor_hostname
            # Fake that each compute node has unique extra specs stats and
            # the RT makes sure those are unique per node.
            stats = {'node:%s' % nodename: nodename}
            self.driver_mock.get_available_resource.return_value = {
                'hypervisor_hostname': nodename,
                'hypervisor_type': 'ironic',
                'hypervisor_version': 0,
                'vcpus': cn.vcpus,
                'vcpus_used': cn.vcpus_used,
                'memory_mb': cn.memory_mb,
                'memory_mb_used': cn.memory_mb_used,
                'local_gb': cn.local_gb,
                'local_gb_used': cn.local_gb_used,
                'numa_topology': None,
                'resource_class': None,  # Act like admin hasn't set yet...
                'stats': stats,
            }
            self.rt.update_available_resource(self.ctx, nodename)

        self.assertEqual(3, len(self.rt.compute_nodes))
        self.assertEqual(3, len(self.rt.stats))

        def _assert_stats():
            # Make sure each compute node has a unique set of stats and
            # they don't accumulate across nodes.
            for _cn in self.rt.compute_nodes.values():
                node_stats_key = 'node:%s' % _cn.hypervisor_hostname
                self.assertIn(node_stats_key, _cn.stats)
                node_stat_count = 0
                for stat in _cn.stats:
                    if stat.startswith('node:'):
                        node_stat_count += 1
                self.assertEqual(1, node_stat_count, _cn.stats)
        _assert_stats()

        # Now "spawn" an instance to the first compute node by calling the
        # RT's instance_claim().
        cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
        cn1_nodename = cn1_obj.hypervisor_hostname
        inst = self.instances[uuids.instance1]
        with self.rt.instance_claim(self.ctx, inst, cn1_nodename, {}):
            _assert_stats()
Пример #8
0
                     siblings=[],
                     pinned_cpus=set([]))
])

COMPUTE_NODES = [
    objects.ComputeNode(id=1,
                        local_gb=1024,
                        memory_mb=1024,
                        vcpus=1,
                        disk_available_least=None,
                        free_ram_mb=512,
                        vcpus_used=1,
                        free_disk_gb=512,
                        local_gb_used=0,
                        updated_at=None,
                        host='host1',
                        hypervisor_hostname='node1',
                        host_ip='127.0.0.1',
                        hypervisor_version=0,
                        numa_topology=None,
                        hypervisor_type='foo',
                        supported_hv_specs=[],
                        pci_device_pools=None,
                        cpu_info=None,
                        stats=None,
                        metrics=None),
    objects.ComputeNode(id=2,
                        local_gb=2048,
                        memory_mb=2048,
                        vcpus=2,
                        disk_available_least=1024,
Пример #9
0
                    topic="compute_topic",
                    report_count=5,
                    disabled=False,
                    disabled_reason=None,
                    availability_zone="nova"),
    objects.Service(id=2,
                    host="compute2",
                    binary="nova-compute",
                    topic="compute_topic",
                    report_count=5,
                    disabled=False,
                    disabled_reason=None,
                    availability_zone="nova"),
]

TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
                   for hyper_dct in TEST_HYPERS]

TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})

TEST_SERVERS = [dict(name="inst1", uuid=uuids.instance_1, host="compute1"),
                dict(name="inst2", uuid=uuids.instance_2, host="compute2"),
                dict(name="inst3", uuid=uuids.instance_3, host="compute1"),
                dict(name="inst4", uuid=uuids.instance_4, host="compute2")]


def fake_compute_node_get_all(context, limit=None, marker=None):
    if marker in ['99999']:
        raise exception.MarkerNotFound(marker)
    marker_found = True if marker is None else False
Пример #10
0
    def _create_instance_data(self):
        """Creates an instance record and associated data like BDMs, VIFs,
        migrations, etc in the source cell and returns the Instance object.

        The idea is to create as many things from the
        Instance.INSTANCE_OPTIONAL_ATTRS list as possible.

        :returns: The created Instance and Migration objects
        """
        # Create the nova-compute services record first.
        fake_service = test_service._fake_service()
        fake_service.pop('version', None)  # version field is immutable
        fake_service.pop('id', None)  # cannot create with an id set
        service = objects.Service(self.source_context, **fake_service)
        service.create()
        # Create the compute node using the service.
        fake_compute_node = copy.copy(test_compute_node.fake_compute_node)
        fake_compute_node['host'] = service.host
        fake_compute_node['hypervisor_hostname'] = service.host
        fake_compute_node['stats'] = {}  # the object requires a dict
        fake_compute_node['service_id'] = service.id
        fake_compute_node.pop('id', None)  # cannot create with an id set
        compute_node = objects.ComputeNode(self.source_context,
                                           **fake_compute_node)
        compute_node.create()

        # Build an Instance object with basic fields set.
        updates = {
            'metadata': {
                'foo': 'bar'
            },
            'system_metadata': {
                'roles': ['member']
            },
            'host': compute_node.host,
            'node': compute_node.hypervisor_hostname
        }
        inst = fake_instance.fake_instance_obj(self.source_context, **updates)
        delattr(inst, 'id')  # cannot create an instance with an id set
        # Now we have to dirty all of the fields because fake_instance_obj
        # uses Instance._from_db_object to create the Instance object we have
        # but _from_db_object calls obj_reset_changes() which resets all of
        # the fields that were on the object, including the basic stuff like
        # the 'host' field, which means those fields don't get set in the DB.
        # TODO(mriedem): This should live in fake_instance_obj with a
        # make_creatable kwarg.
        for field in inst.obj_fields:
            if field in inst:
                setattr(inst, field, getattr(inst, field))
        # Make sure at least one expected basic field is dirty on the Instance.
        self.assertIn('host', inst.obj_what_changed())
        # Set the optional fields on the instance before creating it.
        inst.pci_requests = objects.InstancePCIRequests(requests=[
            objects.InstancePCIRequest(
                **test_instance_pci_requests.fake_pci_requests[0])
        ])
        inst.numa_topology = objects.InstanceNUMATopology(
            cells=test_instance_numa.fake_obj_numa_topology.cells)
        inst.trusted_certs = objects.TrustedCerts(ids=[uuids.cert])
        inst.vcpu_model = test_vcpu_model.fake_vcpumodel
        inst.keypairs = objects.KeyPairList(
            objects=[objects.KeyPair(**test_keypair.fake_keypair)])
        inst.device_metadata = (
            test_instance_device_metadata.get_fake_obj_device_metadata(
                self.source_context))
        # FIXME(mriedem): db.instance_create does not handle tags
        inst.obj_reset_changes(['tags'])
        inst.create()

        bdm = {
            'instance_uuid': inst.uuid,
            'source_type': 'volume',
            'destination_type': 'volume',
            'volume_id': uuids.volume_id,
            'volume_size': 1,
            'device_name': '/dev/vda',
        }
        bdm = objects.BlockDeviceMapping(
            self.source_context,
            **fake_block_device.FakeDbBlockDeviceDict(bdm_dict=bdm))
        delattr(bdm, 'id')  # cannot create a bdm with an id set
        bdm.obj_reset_changes(['id'])
        bdm.create()

        vif = objects.VirtualInterface(self.source_context,
                                       address='de:ad:be:ef:ca:fe',
                                       uuid=uuids.port,
                                       instance_uuid=inst.uuid)
        vif.create()

        info_cache = objects.InstanceInfoCache().new(self.source_context,
                                                     inst.uuid)
        info_cache.network_info = network_model.NetworkInfo(
            [network_model.VIF(id=vif.uuid, address=vif.address)])
        info_cache.save(update_cells=False)

        objects.TagList.create(self.source_context, inst.uuid, ['test'])

        try:
            raise test.TestingException('test-fault')
        except test.TestingException as fault:
            compute_utils.add_instance_fault_from_exc(self.source_context,
                                                      inst, fault)

        objects.InstanceAction().action_start(self.source_context,
                                              inst.uuid,
                                              'resize',
                                              want_result=False)
        objects.InstanceActionEvent().event_start(self.source_context,
                                                  inst.uuid,
                                                  'migrate_server',
                                                  want_result=False)

        # Create a fake migration for the cross-cell resize operation.
        migration = objects.Migration(
            self.source_context,
            **test_migration.fake_db_migration(instance_uuid=inst.uuid,
                                               cross_cell_move=True,
                                               migration_type='resize'))
        delattr(migration, 'id')  # cannot create a migration with an id set
        migration.obj_reset_changes(['id'])
        migration.create()

        # Create an old non-resize migration to make sure it is copied to the
        # target cell database properly.
        old_migration = objects.Migration(
            self.source_context,
            **test_migration.fake_db_migration(instance_uuid=inst.uuid,
                                               migration_type='live-migration',
                                               status='completed',
                                               uuid=uuids.old_migration))
        delattr(old_migration, 'id')  # cannot create a migration with an id
        old_migration.obj_reset_changes(['id'])
        old_migration.create()

        fake_pci_device = copy.copy(test_pci_device.fake_db_dev)
        fake_pci_device['extra_info'] = {}  # the object requires a dict
        fake_pci_device['compute_node_id'] = compute_node.id
        pci_device = objects.PciDevice.create(self.source_context,
                                              fake_pci_device)
        pci_device.allocate(inst)  # sets the status and instance_uuid fields
        pci_device.save()

        # Return a fresh copy of the instance from the DB with as many joined
        # fields loaded as possible.
        expected_attrs = copy.copy(instance_obj.INSTANCE_OPTIONAL_ATTRS)
        # Cannot load fault from get_by_uuid.
        expected_attrs.remove('fault')
        inst = objects.Instance.get_by_uuid(self.source_context,
                                            inst.uuid,
                                            expected_attrs=expected_attrs)
        return inst, migration
Пример #11
0
class IronicResourceTrackerTest(test_base.SchedulerReportClientTestBase):
    """Tests the behaviour of the resource tracker with regards to the
    transitional period between adding support for custom resource classes in
    the placement API and integrating inventory and allocation records for
    Ironic baremetal nodes with those custom resource classes.
    """

    FLAVOR_FIXTURES = {
        'CUSTOM_SMALL_IRON':
        objects.Flavor(
            name='CUSTOM_SMALL_IRON',
            flavorid=42,
            vcpus=4,
            memory_mb=4096,
            root_gb=1024,
            swap=0,
            ephemeral_gb=0,
            extra_specs={},
        ),
        'CUSTOM_BIG_IRON':
        objects.Flavor(
            name='CUSTOM_BIG_IRON',
            flavorid=43,
            vcpus=16,
            memory_mb=65536,
            root_gb=1024,
            swap=0,
            ephemeral_gb=0,
            extra_specs={},
        ),
    }

    COMPUTE_NODE_FIXTURES = {
        uuids.cn1:
        objects.ComputeNode(
            uuid=uuids.cn1,
            hypervisor_hostname='cn1',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=4,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=4096,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=1024,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
        uuids.cn2:
        objects.ComputeNode(
            uuid=uuids.cn2,
            hypervisor_hostname='cn2',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=4,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=4096,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=1024,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
        uuids.cn3:
        objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='cn3',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=16,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=65536,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=2048,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
    }

    INSTANCE_FIXTURES = {
        uuids.instance1:
        objects.Instance(
            uuid=uuids.instance1,
            flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'],
            vm_state=vm_states.BUILDING,
            task_state=task_states.SPAWNING,
            power_state=power_state.RUNNING,
            project_id='project',
            user_id=uuids.user,
        ),
    }

    def _set_client(self, client):
        """Set up embedded report clients to use the direct one from the
        interceptor.
        """
        self.report_client = client
        self.rt.scheduler_client.reportclient = client
        self.rt.reportclient = client

    def setUp(self):
        super(IronicResourceTrackerTest, self).setUp()
        self.flags(
            reserved_host_memory_mb=0,
            cpu_allocation_ratio=1.0,
            ram_allocation_ratio=1.0,
            disk_allocation_ratio=1.0,
        )

        self.ctx = context.RequestContext('user', 'project')

        driver = mock.MagicMock(autospec=virt_driver.ComputeDriver)
        driver.node_is_available.return_value = True
        driver.update_provider_tree.side_effect = NotImplementedError
        self.driver_mock = driver
        self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver)
        self.instances = self.create_fixtures()

    def create_fixtures(self):
        for flavor in self.FLAVOR_FIXTURES.values():
            # Clone the object so the class variable isn't
            # modified by reference.
            flavor = flavor.obj_clone()
            flavor._context = self.ctx
            flavor.obj_set_defaults()
            flavor.create()

        # We create some compute node records in the Nova cell DB to simulate
        # data before adding integration for Ironic baremetal nodes with the
        # placement API...
        for cn in self.COMPUTE_NODE_FIXTURES.values():
            # Clone the object so the class variable isn't
            # modified by reference.
            cn = cn.obj_clone()
            cn._context = self.ctx
            cn.obj_set_defaults()
            cn.create()

        instances = {}
        for instance in self.INSTANCE_FIXTURES.values():
            # Clone the object so the class variable isn't
            # modified by reference.
            instance = instance.obj_clone()
            instance._context = self.ctx
            instance.obj_set_defaults()
            instance.create()
            instances[instance.uuid] = instance
        return instances

    def placement_get_inventory(self, rp_uuid):
        url = '/resource_providers/%s/inventories' % rp_uuid
        resp = self.report_client.get(url)
        if 200 <= resp.status_code < 300:
            return resp.json()['inventories']
        else:
            return resp.status_code

    def placement_get_allocations(self, consumer_uuid):
        url = '/allocations/%s' % consumer_uuid
        resp = self.report_client.get(url)
        if 200 <= resp.status_code < 300:
            return resp.json()['allocations']
        else:
            return resp.status_code

    def placement_get_custom_rcs(self):
        url = '/resource_classes'
        resp = self.report_client.get(url)
        if 200 <= resp.status_code < 300:
            all_rcs = resp.json()['resource_classes']
            return [
                rc['name'] for rc in all_rcs
                if rc['name'] not in fields.ResourceClass.STANDARD
            ]

    @mock.patch('nova.compute.utils.is_volume_backed_instance',
                new=mock.Mock(return_value=False))
    @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
    def test_ironic_ocata_to_pike(self):
        """Check that when going from an Ocata installation with Ironic having
        node's resource class attributes set, that we properly "auto-heal" the
        inventory and allocation records in the placement API to account for
        both the old-style VCPU/MEMORY_MB/DISK_GB resources as well as the new
        custom resource class from Ironic's node.resource_class attribute.
        """
        with self._interceptor():
            # Before the resource tracker is "initialized", we shouldn't have
            # any compute nodes in the RT's cache...
            self.assertEqual(0, len(self.rt.compute_nodes))

            # There should not be any records in the placement API since we
            # haven't yet run update_available_resource() in the RT.
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                self.assertEqual(404, self.placement_get_inventory(cn.uuid))

            for inst in self.INSTANCE_FIXTURES.keys():
                self.assertEqual({}, self.placement_get_allocations(inst))

            # Nor should there be any custom resource classes in the placement
            # API, since we haven't had an Ironic node's resource class set yet
            self.assertEqual(0, len(self.placement_get_custom_rcs()))

            # Now "initialize" the resource tracker as if the compute host is a
            # Ocata host, with Ironic virt driver, but the admin has not yet
            # added a resource_class attribute to the Ironic baremetal nodes in
            # her system.
            # NOTE(jaypipes): This is what nova.compute.manager.ComputeManager
            # does when "initializing" the service...
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                nodename = cn.hypervisor_hostname
                self.driver_mock.get_available_resource.return_value = {
                    'hypervisor_hostname': nodename,
                    'hypervisor_type': 'ironic',
                    'hypervisor_version': 0,
                    'vcpus': cn.vcpus,
                    'vcpus_used': cn.vcpus_used,
                    'memory_mb': cn.memory_mb,
                    'memory_mb_used': cn.memory_mb_used,
                    'local_gb': cn.local_gb,
                    'local_gb_used': cn.local_gb_used,
                    'numa_topology': None,
                    'resource_class': None,  # Act like admin hasn't set yet...
                }
                self.driver_mock.get_inventory.return_value = {
                    VCPU: {
                        'total': cn.vcpus,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.vcpus,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                    MEMORY_MB: {
                        'total': cn.memory_mb,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.memory_mb,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                    DISK_GB: {
                        'total': cn.local_gb,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.local_gb,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                }
                self.rt.update_available_resource(self.ctx, nodename)

            self.assertEqual(3, len(self.rt.compute_nodes))
            # A canary just to make sure the assertion below about the custom
            # resource class being added wasn't already added somehow...
            crcs = self.placement_get_custom_rcs()
            self.assertNotIn('CUSTOM_SMALL_IRON', crcs)

            # Verify that the placement API has the "old-style" resources in
            # inventory and allocations
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                inv = self.placement_get_inventory(cn.uuid)
                self.assertEqual(3, len(inv))

            # Now "spawn" an instance to the first compute node by calling the
            # RT's instance_claim().
            cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
            cn1_nodename = cn1_obj.hypervisor_hostname
            inst = self.instances[uuids.instance1]
            # Since we're pike, the scheduler would have created our
            # allocation for us. So, we can use our old update routine
            # here to mimic that before we go do the compute RT claim,
            # and then the checks below.
            self.rt.reportclient.update_instance_allocation(
                self.ctx, cn1_obj, inst, 1)
            with self.rt.instance_claim(self.ctx, inst, cn1_nodename):
                pass

            allocs = self.placement_get_allocations(inst.uuid)
            self.assertEqual(1, len(allocs))
            self.assertIn(uuids.cn1, allocs)

            resources = allocs[uuids.cn1]['resources']
            self.assertEqual(3, len(resources))
            for rc in (VCPU, MEMORY_MB, DISK_GB):
                self.assertIn(rc, resources)

            # Now we emulate the operator setting ONE of the Ironic node's
            # resource class attribute to the value of a custom resource class
            # and re-run update_available_resource(). We will expect to see the
            # inventory and allocations reset for the first compute node that
            # had an instance on it. The new inventory and allocation records
            # will be for VCPU, MEMORY_MB, DISK_GB, and also a new record for
            # the custom resource class of the Ironic node.
            self.driver_mock.get_available_resource.return_value = {
                'hypervisor_hostname': cn1_obj.hypervisor_hostname,
                'hypervisor_type': 'ironic',
                'hypervisor_version': 0,
                'vcpus': cn1_obj.vcpus,
                'vcpus_used': cn1_obj.vcpus_used,
                'memory_mb': cn1_obj.memory_mb,
                'memory_mb_used': cn1_obj.memory_mb_used,
                'local_gb': cn1_obj.local_gb,
                'local_gb_used': cn1_obj.local_gb_used,
                'numa_topology': None,
                'resource_class': 'small-iron',
            }
            self.driver_mock.get_inventory.return_value = {
                VCPU: {
                    'total': cn1_obj.vcpus,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.vcpus,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                MEMORY_MB: {
                    'total': cn1_obj.memory_mb,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.memory_mb,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                DISK_GB: {
                    'total': cn1_obj.local_gb,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.local_gb,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                'CUSTOM_SMALL_IRON': {
                    'total': 1,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 1,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            self.rt.update_available_resource(self.ctx, cn1_nodename)

            # Verify the auto-creation of the custom resource class, normalized
            # to what the placement API expects
            self.assertIn('CUSTOM_SMALL_IRON', self.placement_get_custom_rcs())

            allocs = self.placement_get_allocations(inst.uuid)
            self.assertEqual(1, len(allocs))
            self.assertIn(uuids.cn1, allocs)

            resources = allocs[uuids.cn1]['resources']
            self.assertEqual(3, len(resources))
            for rc in (VCPU, MEMORY_MB, DISK_GB):
                self.assertIn(rc, resources)

            # TODO(jaypipes): Check allocations include the CUSTOM_SMALL_IRON
            # resource class. At the moment, we do not add an allocation record
            # for the Ironic custom resource class. Once the flavor is updated
            # to store a resources:$CUSTOM_RESOURCE_CLASS=1 extra_spec key and
            # the scheduler is constructing the request_spec to actually
            # request a single amount of that custom resource class, we will
            # modify the allocation/claim to consume only the custom resource
            # class and not the VCPU, MEMORY_MB and DISK_GB.

    @mock.patch('nova.compute.utils.is_volume_backed_instance',
                new=mock.Mock(return_value=False))
    @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
    def test_node_stats_isolation(self):
        """Regression test for bug 1784705 introduced in Ocata.

        The ResourceTracker.stats field is meant to track per-node stats
        so this test registers three compute nodes with a single RT where
        each node has unique stats, and then makes sure that after updating
        usage for an instance, the nodes still have their unique stats and
        nothing is leaked from node to node.
        """
        self.useFixture(nova_fixtures.PlacementFixture())
        # Before the resource tracker is "initialized", we shouldn't have
        # any compute nodes or stats in the RT's cache...
        self.assertEqual(0, len(self.rt.compute_nodes))
        self.assertEqual(0, len(self.rt.stats))

        # Now "initialize" the resource tracker. This is what
        # nova.compute.manager.ComputeManager does when "initializing" the
        # nova-compute service. Do this in a predictable order so cn1 is
        # first and cn3 is last.
        for cn in sorted(self.COMPUTE_NODE_FIXTURES.values(),
                         key=lambda _cn: _cn.hypervisor_hostname):
            nodename = cn.hypervisor_hostname
            # Fake that each compute node has unique extra specs stats and
            # the RT makes sure those are unique per node.
            stats = {'node:%s' % nodename: nodename}
            self.driver_mock.get_available_resource.return_value = {
                'hypervisor_hostname': nodename,
                'hypervisor_type': 'ironic',
                'hypervisor_version': 0,
                'vcpus': cn.vcpus,
                'vcpus_used': cn.vcpus_used,
                'memory_mb': cn.memory_mb,
                'memory_mb_used': cn.memory_mb_used,
                'local_gb': cn.local_gb,
                'local_gb_used': cn.local_gb_used,
                'numa_topology': None,
                'resource_class': None,  # Act like admin hasn't set yet...
                'stats': stats,
            }
            self.driver_mock.get_inventory.return_value = {
                'CUSTOM_SMALL_IRON': {
                    'total': 1,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 1,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            self.rt.update_available_resource(self.ctx, nodename)

        self.assertEqual(3, len(self.rt.compute_nodes))
        self.assertEqual(3, len(self.rt.stats))

        def _assert_stats():
            # Make sure each compute node has a unique set of stats and
            # they don't accumulate across nodes.
            for _cn in self.rt.compute_nodes.values():
                node_stats_key = 'node:%s' % _cn.hypervisor_hostname
                self.assertIn(node_stats_key, _cn.stats)
                node_stat_count = 0
                for stat in _cn.stats:
                    if stat.startswith('node:'):
                        node_stat_count += 1
                self.assertEqual(1, node_stat_count, _cn.stats)

        _assert_stats()

        # Now "spawn" an instance to the first compute node by calling the
        # RT's instance_claim().
        cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
        cn1_nodename = cn1_obj.hypervisor_hostname
        inst = self.instances[uuids.instance1]
        with self.rt.instance_claim(self.ctx, inst, cn1_nodename):
            _assert_stats()
Пример #12
0
import os
from nova import objects
import json
from oslo_utils import units
import build_new_host_topology

objects.register_all()

primitive_dir = os.path.join(os.curdir, 'test_primitives')
inst_primitive_files = [
    os.path.join(primitive_dir, x) for x in os.listdir(primitive_dir)
    if 'instance-primitive' in x
]
host_primitive_files = [
    os.path.join(primitive_dir, x) for x in os.listdir(primitive_dir)
    if 'host-primitive' in x
]

Inst = objects.Instance()
Host = objects.ComputeNode()

inst_primitives = [json.load(open(x)) for x in inst_primitive_files]
insts = [Inst.obj_from_primitive(x) for x in inst_primitives]

host_primitives = [json.load(open(x)) for x in host_primitive_files]
hosts = [Host.obj_from_primitive(x) for x in host_primitives]

print "insts"
print "hosts"
Пример #13
0
#    under the License.
"""
Fake nodes for Ironic host manager tests.
"""

from nova import objects


COMPUTE_NODES = [
        objects.ComputeNode(
            id=1, local_gb=10, memory_mb=1024, vcpus=1,
            vcpus_used=0, local_gb_used=0, memory_mb_used=0,
            updated_at=None, cpu_info='baremetal cpu',
            host='host1',
            hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
            hypervisor_version=1, hypervisor_type='ironic',
            stats=dict(ironic_driver=
                       "nova.virt.ironic.driver.IronicDriver",
                       cpu_arch='i386'),
            supported_hv_specs=[objects.HVSpec.from_list(
                ["i386", "baremetal", "baremetal"])],
            free_disk_gb=10, free_ram_mb=1024,
            cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
        objects.ComputeNode(
            id=2, local_gb=20, memory_mb=2048, vcpus=1,
            vcpus_used=0, local_gb_used=0, memory_mb_used=0,
            updated_at=None, cpu_info='baremetal cpu',
            host='host2',
            hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
            hypervisor_version=1, hypervisor_type='ironic',
            stats=dict(ironic_driver=
                       "nova.virt.ironic.driver.IronicDriver",
Пример #14
0
    def test_execute_with_destination(self, new_mode=True):
        dest_node = objects.ComputeNode(hypervisor_hostname='dest_node')
        with test.nested(
                mock.patch.object(self.task, '_check_host_is_up'),
                mock.patch.object(self.task,
                                  '_check_requested_destination',
                                  return_value=(mock.sentinel.source_node,
                                                dest_node)),
                mock.patch.object(scheduler_utils,
                                  'claim_resources_on_destination'),
                mock.patch.object(self.migration, 'save'),
                mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
                mock.patch('nova.conductor.tasks.migrate.'
                           'replace_allocation_with_migration'),
                mock.patch(
                    'nova.conductor.tasks.live_migrate.'
                    'should_do_migration_allocation')) as (mock_check_up,
                                                           mock_check_dest,
                                                           mock_claim,
                                                           mock_save, mock_mig,
                                                           m_alloc, mock_sda):
            mock_mig.return_value = "bob"
            m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs)
            mock_sda.return_value = new_mode

            self.assertEqual("bob", self.task.execute())
            mock_check_up.assert_called_once_with(self.instance_host)
            mock_check_dest.assert_called_once_with()
            if new_mode:
                allocs = mock.sentinel.allocs
            else:
                allocs = None
            mock_claim.assert_called_once_with(
                self.context,
                self.task.scheduler_client.reportclient,
                self.instance,
                mock.sentinel.source_node,
                dest_node,
                source_node_allocations=allocs)
            mock_mig.assert_called_once_with(
                self.context,
                host=self.instance_host,
                instance=self.instance,
                dest=self.destination,
                block_migration=self.block_migration,
                migration=self.migration,
                migrate_data=None)
            self.assertTrue(mock_save.called)
            # make sure the source/dest fields were set on the migration object
            self.assertEqual(self.instance.node, self.migration.source_node)
            self.assertEqual(dest_node.hypervisor_hostname,
                             self.migration.dest_node)
            self.assertEqual(self.task.destination,
                             self.migration.dest_compute)
            if new_mode:
                m_alloc.assert_called_once_with(self.context, self.instance,
                                                self.migration)
            else:
                m_alloc.assert_not_called()
        # When the task is executed with a destination it means the host is
        # being forced and we don't call the scheduler, so we don't need to
        # heal the request spec.
        self.heal_reqspec_is_bfv_mock.assert_not_called()

        # When the task is executed with a destination it means the host is
        # being forced and we don't call the scheduler, so we don't need to
        # modify the request spec
        self.ensure_network_metadata_mock.assert_not_called()
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cns = self.compute_nodes
        pt = provider_tree.ProviderTree(cns)

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
            1,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        numa_cell0 = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1 = pt.new_child('numa_cell1', cn1.uuid)

        self.assertEqual(numa_cell0, pt.find('numa_cell0'))
        self.assertEqual(numa_cell0, pt.find(numa_cell0.uuid))

        self.assertTrue(pt.exists(numa_cell0.uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1.uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0 = pt.new_child('pf1_cell0', numa_cell0.uuid)
        self.assertTrue(pt.exists(pf1_cell0.uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid, 1)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
            1,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        # Save numa cell1 uuid since removing will invalidate the object
        cell0_uuid = numa_cell0.uuid
        cell1_uuid = numa_cell1.uuid
        pf1_uuid = pf1_cell0.uuid

        pt.remove(cell1_uuid)
        self.assertFalse(pt.exists(cell1_uuid))
        self.assertTrue(pt.exists(pf1_uuid))
        self.assertTrue(pt.exists(cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_uuid))
        self.assertFalse(pt.exists(cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
Пример #16
0
 def test_update_instance_allocation_delete(self, mock_delete):
     cn = objects.ComputeNode(uuid=uuids.cn)
     inst = objects.Instance(uuid=uuids.inst)
     self.client.update_instance_allocation(cn, inst, -1)
     mock_delete.assert_called_once_with('/allocations/%s' % inst.uuid)
Пример #17
0
class ComputeHostAPITestCase(test.TestCase):
    def setUp(self):
        super(ComputeHostAPITestCase, self).setUp()
        self.host_api = compute.HostAPI()
        self.aggregate_api = compute_api.AggregateAPI()
        self.ctxt = context.get_admin_context()
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
        self.req = fakes.HTTPRequest.blank('')
        self.controller = services.ServiceController()
        self.useFixture(nova_fixtures.SingleCellSimple())

    def _compare_obj(self, obj, db_obj):
        test_objects.compare_obj(self,
                                 obj,
                                 db_obj,
                                 allow_missing=test_service.OPTIONAL)

    def _compare_objs(self, obj_list, db_obj_list):
        self.assertEqual(len(obj_list), len(db_obj_list),
                         "The length of two object lists are different.")
        for index, obj in enumerate(obj_list):
            self._compare_obj(obj, db_obj_list[index])

    def test_set_host_enabled(self):
        fake_notifier.NOTIFICATIONS = []

        @mock.patch.object(self.host_api.rpcapi,
                           'set_host_enabled',
                           return_value='fake-result')
        @mock.patch.object(self.host_api,
                           '_assert_host_exists',
                           return_value='fake_host')
        def _do_test(mock_assert_host_exists, mock_set_host_enabled):
            result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
                                                    'fake_enabled')
            self.assertEqual('fake-result', result)
            self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
            msg = fake_notifier.NOTIFICATIONS[0]
            self.assertEqual('HostAPI.set_enabled.start', msg.event_type)
            self.assertEqual('api.fake_host', msg.publisher_id)
            self.assertEqual('INFO', msg.priority)
            self.assertEqual('fake_enabled', msg.payload['enabled'])
            self.assertEqual('fake_host', msg.payload['host_name'])
            msg = fake_notifier.NOTIFICATIONS[1]
            self.assertEqual('HostAPI.set_enabled.end', msg.event_type)
            self.assertEqual('api.fake_host', msg.publisher_id)
            self.assertEqual('INFO', msg.priority)
            self.assertEqual('fake_enabled', msg.payload['enabled'])
            self.assertEqual('fake_host', msg.payload['host_name'])

        _do_test()

    def test_host_name_from_assert_hosts_exists(self):
        @mock.patch.object(self.host_api.rpcapi,
                           'set_host_enabled',
                           return_value='fake-result')
        @mock.patch.object(self.host_api,
                           '_assert_host_exists',
                           return_value='fake_host')
        def _do_test(mock_assert_host_exists, mock_set_host_enabled):
            result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
                                                    'fake_enabled')
            self.assertEqual('fake-result', result)

        _do_test()

    def test_get_host_uptime(self):
        @mock.patch.object(self.host_api.rpcapi,
                           'get_host_uptime',
                           return_value='fake-result')
        @mock.patch.object(self.host_api,
                           '_assert_host_exists',
                           return_value='fake_host')
        def _do_test(mock_assert_host_exists, mock_get_host_uptime):
            result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
            self.assertEqual('fake-result', result)

        _do_test()

    def test_get_host_uptime_service_down(self):
        @mock.patch.object(self.host_api.db,
                           'service_get_by_compute_host',
                           return_value=dict(test_service.fake_service, id=1))
        @mock.patch.object(self.host_api.servicegroup_api,
                           'service_is_up',
                           return_value=False)
        def _do_test(mock_service_is_up, mock_service_get_by_compute_host):
            self.assertRaises(exception.ComputeServiceUnavailable,
                              self.host_api.get_host_uptime, self.ctxt,
                              'fake_host')

        _do_test()

    def test_host_power_action(self):
        fake_notifier.NOTIFICATIONS = []

        @mock.patch.object(self.host_api.rpcapi,
                           'host_power_action',
                           return_value='fake-result')
        @mock.patch.object(self.host_api,
                           '_assert_host_exists',
                           return_value='fake_host')
        def _do_test(mock_assert_host_exists, mock_host_power_action):
            result = self.host_api.host_power_action(self.ctxt, 'fake_host',
                                                     'fake_action')
            self.assertEqual('fake-result', result)
            self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
            msg = fake_notifier.NOTIFICATIONS[0]
            self.assertEqual('HostAPI.power_action.start', msg.event_type)
            self.assertEqual('api.fake_host', msg.publisher_id)
            self.assertEqual('INFO', msg.priority)
            self.assertEqual('fake_action', msg.payload['action'])
            self.assertEqual('fake_host', msg.payload['host_name'])
            msg = fake_notifier.NOTIFICATIONS[1]
            self.assertEqual('HostAPI.power_action.end', msg.event_type)
            self.assertEqual('api.fake_host', msg.publisher_id)
            self.assertEqual('INFO', msg.priority)
            self.assertEqual('fake_action', msg.payload['action'])
            self.assertEqual('fake_host', msg.payload['host_name'])

        _do_test()

    def test_set_host_maintenance(self):
        fake_notifier.NOTIFICATIONS = []

        @mock.patch.object(self.host_api.rpcapi,
                           'host_maintenance_mode',
                           return_value='fake-result')
        @mock.patch.object(self.host_api,
                           '_assert_host_exists',
                           return_value='fake_host')
        def _do_test(mock_assert_host_exists, mock_host_maintenance_mode):
            result = self.host_api.set_host_maintenance(
                self.ctxt, 'fake_host', 'fake_mode')
            self.assertEqual('fake-result', result)
            self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
            msg = fake_notifier.NOTIFICATIONS[0]
            self.assertEqual('HostAPI.set_maintenance.start', msg.event_type)
            self.assertEqual('api.fake_host', msg.publisher_id)
            self.assertEqual('INFO', msg.priority)
            self.assertEqual('fake_host', msg.payload['host_name'])
            self.assertEqual('fake_mode', msg.payload['mode'])
            msg = fake_notifier.NOTIFICATIONS[1]
            self.assertEqual('HostAPI.set_maintenance.end', msg.event_type)
            self.assertEqual('api.fake_host', msg.publisher_id)
            self.assertEqual('INFO', msg.priority)
            self.assertEqual('fake_host', msg.payload['host_name'])
            self.assertEqual('fake_mode', msg.payload['mode'])

        _do_test()

    def test_service_get_all_cells(self):
        cells = objects.CellMappingList.get_all(self.ctxt)
        for cell in cells:
            with context.target_cell(self.ctxt, cell) as cctxt:
                objects.Service(context=cctxt,
                                binary='nova-compute',
                                host='host-%s' % cell.uuid).create()
        services = self.host_api.service_get_all(self.ctxt, all_cells=True)
        self.assertEqual(sorted(['host-%s' % cell.uuid for cell in cells]),
                         sorted([svc.host for svc in services]))

    @mock.patch('nova.context.scatter_gather_cells')
    def test_service_get_all_cells_with_failures(self, mock_sg):
        service = objects.Service(binary='nova-compute',
                                  host='host-%s' % uuids.cell1)
        mock_sg.return_value = {
            uuids.cell1: [service],
            uuids.cell2: context.raised_exception_sentinel
        }
        services = self.host_api.service_get_all(self.ctxt, all_cells=True)
        # returns the results from cell1 and ignores cell2.
        self.assertEqual(['host-%s' % uuids.cell1],
                         [svc.host for svc in services])

    def test_service_get_all_no_zones(self):
        services = [
            dict(test_service.fake_service,
                 id=1,
                 topic='compute',
                 host='host1'),
            dict(test_service.fake_service, topic='compute', host='host2')
        ]

        @mock.patch.object(self.host_api.db, 'service_get_all')
        def _do_test(mock_service_get_all):
            mock_service_get_all.return_value = services
            # Test no filters
            result = self.host_api.service_get_all(self.ctxt)
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, services)

            # Test no filters #2
            mock_service_get_all.reset_mock()
            result = self.host_api.service_get_all(self.ctxt, filters={})
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, services)

            # Test w/ filter
            mock_service_get_all.reset_mock()
            result = self.host_api.service_get_all(self.ctxt,
                                                   filters=dict(host='host2'))
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, [services[1]])

        _do_test()

    def test_service_get_all(self):
        services = [
            dict(test_service.fake_service, topic='compute', host='host1'),
            dict(test_service.fake_service, topic='compute', host='host2')
        ]
        exp_services = []
        for service in services:
            exp_service = {}
            exp_service.update(availability_zone='nova', **service)
            exp_services.append(exp_service)

        @mock.patch.object(self.host_api.db, 'service_get_all')
        def _do_test(mock_service_get_all):
            mock_service_get_all.return_value = services

            # Test no filters
            result = self.host_api.service_get_all(self.ctxt, set_zones=True)
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, exp_services)

            # Test no filters #2
            mock_service_get_all.reset_mock()
            result = self.host_api.service_get_all(self.ctxt,
                                                   filters={},
                                                   set_zones=True)
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, exp_services)

            # Test w/ filter
            mock_service_get_all.reset_mock()
            result = self.host_api.service_get_all(self.ctxt,
                                                   filters=dict(host='host2'),
                                                   set_zones=True)
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, [exp_services[1]])

            # Test w/ zone filter but no set_zones arg.
            mock_service_get_all.reset_mock()
            filters = {'availability_zone': 'nova'}
            result = self.host_api.service_get_all(self.ctxt, filters=filters)
            mock_service_get_all.assert_called_once_with(self.ctxt,
                                                         disabled=None)
            self._compare_objs(result, exp_services)

        _do_test()

    def test_service_get_by_compute_host(self):
        @mock.patch.object(self.host_api.db,
                           'service_get_by_compute_host',
                           return_value=test_service.fake_service)
        def _do_test(mock_service_get_by_compute_host):
            result = self.host_api.service_get_by_compute_host(
                self.ctxt, 'fake-host')
            self.assertEqual(test_service.fake_service['id'], result.id)

        _do_test()

    def test_service_update(self):
        host_name = 'fake-host'
        binary = 'nova-compute'
        params_to_update = dict(disabled=True)
        service_id = 42
        expected_result = dict(test_service.fake_service, id=service_id)

        @mock.patch.object(self.host_api.db, 'service_get_by_host_and_binary')
        @mock.patch.object(self.host_api.db, 'service_update')
        def _do_test(mock_service_update, mock_service_get_by_host_and_binary):
            mock_service_get_by_host_and_binary.return_value = expected_result
            mock_service_update.return_value = expected_result

            result = self.host_api.service_update(self.ctxt, host_name, binary,
                                                  params_to_update)
            self._compare_obj(result, expected_result)

        _do_test()

    @mock.patch.object(objects.InstanceList,
                       'get_by_host',
                       return_value=['fake-responses'])
    def test_instance_get_all_by_host(self, mock_get):
        result = self.host_api.instance_get_all_by_host(self.ctxt, 'fake-host')
        self.assertEqual(['fake-responses'], result)

    def test_task_log_get_all(self):
        @mock.patch.object(self.host_api.db,
                           'task_log_get_all',
                           return_value='fake-response')
        def _do_test(mock_task_log_get_all):
            result = self.host_api.task_log_get_all(self.ctxt,
                                                    'fake-name',
                                                    'fake-begin',
                                                    'fake-end',
                                                    host='fake-host',
                                                    state='fake-state')
            self.assertEqual('fake-response', result)

        _do_test()

    @mock.patch.object(
        objects.CellMappingList,
        'get_all',
        return_value=objects.CellMappingList(objects=[
            objects.CellMapping(uuid=uuids.cell1_uuid,
                                transport_url='mq://fake1',
                                database_connection='db://fake1'),
            objects.CellMapping(uuid=uuids.cell2_uuid,
                                transport_url='mq://fake2',
                                database_connection='db://fake2'),
            objects.CellMapping(uuid=uuids.cell3_uuid,
                                transport_url='mq://fake3',
                                database_connection='db://fake3')
        ]))
    @mock.patch.object(
        objects.Service,
        'get_by_uuid',
        side_effect=[
            exception.ServiceNotFound(service_id=uuids.service_uuid),
            objects.Service(uuid=uuids.service_uuid)
        ])
    def test_service_get_by_id_using_uuid(self, service_get_by_uuid,
                                          cell_mappings_get_all):
        """Tests that we can lookup a service in the HostAPI using a uuid.
        There are two calls to objects.Service.get_by_uuid and the first
        raises ServiceNotFound so that we ensure we keep looping over the
        cells. We'll find the service in the second cell and break the loop
        so that we don't needlessly check in the third cell.
        """
        def _fake_set_target_cell(ctxt, cell_mapping):
            if cell_mapping:
                # These aren't really what would be set for values but let's
                # keep this simple so we can assert something is set when a
                # mapping is provided.
                ctxt.db_connection = cell_mapping.database_connection
                ctxt.mq_connection = cell_mapping.transport_url

        # We have to override the SingleCellSimple fixture.
        self.useFixture(
            fixtures.MonkeyPatch('nova.context.set_target_cell',
                                 _fake_set_target_cell))
        ctxt = context.get_admin_context()
        self.assertIsNone(ctxt.db_connection)
        self.host_api.service_get_by_id(ctxt, uuids.service_uuid)
        # We should have broken the loop over the cells and set the target cell
        # on the context.
        service_get_by_uuid.assert_has_calls(
            [mock.call(ctxt, uuids.service_uuid)] * 2)
        self.assertEqual('db://fake2', ctxt.db_connection)

    @mock.patch('nova.context.set_target_cell')
    @mock.patch('nova.compute.api.load_cells')
    @mock.patch('nova.objects.Service.get_by_id')
    def test_service_delete(self, get_by_id, load_cells, set_target):
        compute_api.CELLS = [
            objects.CellMapping(),
            objects.CellMapping(),
            objects.CellMapping(),
        ]

        service = mock.MagicMock()
        get_by_id.side_effect = [
            exception.ServiceNotFound(service_id=1), service,
            exception.ServiceNotFound(service_id=1)
        ]
        self.host_api.service_delete(self.ctxt, 1)
        get_by_id.assert_has_calls([
            mock.call(self.ctxt, 1),
            mock.call(self.ctxt, 1),
            mock.call(self.ctxt, 1)
        ])
        service.destroy.assert_called_once_with()
        set_target.assert_called_once_with(self.ctxt, compute_api.CELLS[1])

    @mock.patch('nova.context.set_target_cell')
    @mock.patch('nova.compute.api.load_cells')
    @mock.patch('nova.objects.Service.get_by_id')
    def test_service_delete_ambiguous(self, get_by_id, load_cells, set_target):
        compute_api.CELLS = [
            objects.CellMapping(),
            objects.CellMapping(),
            objects.CellMapping(),
        ]

        service1 = mock.MagicMock()
        service2 = mock.MagicMock()
        get_by_id.side_effect = [
            exception.ServiceNotFound(service_id=1), service1, service2
        ]
        self.assertRaises(exception.ServiceNotUnique,
                          self.host_api.service_delete, self.ctxt, 1)
        self.assertFalse(service1.destroy.called)
        self.assertFalse(service2.destroy.called)
        self.assertFalse(set_target.called)

    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
                'aggregate_remove_host')
    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
                'aggregate_add_host')
    @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
    @mock.patch.object(objects.HostMapping, 'get_by_host')
    def test_service_delete_compute_in_aggregate(self, mock_hm, mock_get_cn,
                                                 mock_add_host,
                                                 mock_remove_host):
        compute = self.host_api.db.service_create(
            self.ctxt, {
                'host': 'fake-compute-host',
                'binary': 'nova-compute',
                'topic': 'compute',
                'report_count': 0
            })
        # This is needed because of lazy-loading service.compute_node
        cn = objects.ComputeNode(uuid=uuids.cn,
                                 host="fake-compute-host",
                                 hypervisor_hostname="fake-compute-host")
        mock_get_cn.return_value = [cn]
        aggregate = self.aggregate_api.create_aggregate(
            self.ctxt, 'aggregate', None)
        self.aggregate_api.add_host_to_aggregate(self.ctxt, aggregate.id,
                                                 'fake-compute-host')
        mock_add_host.assert_called_once_with(mock.ANY, aggregate.uuid,
                                              'fake-compute-host')
        self.controller.delete(self.req, compute.id)
        result = self.aggregate_api.get_aggregate(self.ctxt,
                                                  aggregate.id).hosts
        self.assertEqual([], result)
        mock_hm.return_value.destroy.assert_called_once_with()
        mock_remove_host.assert_called_once_with(mock.ANY, aggregate.uuid,
                                                 'fake-compute-host')

    @mock.patch('nova.db.api.compute_node_statistics')
    def test_compute_node_statistics(self, mock_cns):
        # Note this should only be called twice
        mock_cns.side_effect = [
            {
                'stat1': 1,
                'stat2': 4.0
            },
            {
                'stat1': 5,
                'stat2': 1.2
            },
        ]
        compute_api.CELLS = [
            objects.CellMapping(uuid=uuids.cell1),
            objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID),
            objects.CellMapping(uuid=uuids.cell2)
        ]
        stats = self.host_api.compute_node_statistics(self.ctxt)
        self.assertEqual({'stat1': 6, 'stat2': 5.2}, stats)

    @mock.patch.object(
        objects.CellMappingList,
        'get_all',
        return_value=objects.CellMappingList(objects=[
            objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID,
                                transport_url='mq://cell0',
                                database_connection='db://cell0'),
            objects.CellMapping(uuid=uuids.cell1_uuid,
                                transport_url='mq://fake1',
                                database_connection='db://fake1'),
            objects.CellMapping(uuid=uuids.cell2_uuid,
                                transport_url='mq://fake2',
                                database_connection='db://fake2')
        ]))
    @mock.patch.object(objects.ComputeNode,
                       'get_by_uuid',
                       side_effect=[
                           exception.ComputeHostNotFound(host=uuids.cn_uuid),
                           objects.ComputeNode(uuid=uuids.cn_uuid)
                       ])
    def test_compute_node_get_using_uuid(self, compute_get_by_uuid,
                                         cell_mappings_get_all):
        """Tests that we can lookup a compute node in the HostAPI using a uuid.
        """
        self.host_api.compute_node_get(self.ctxt, uuids.cn_uuid)
        # cell0 should have been skipped, and the compute node wasn't found
        # in cell1 so we checked cell2 and found it
        self.assertEqual(2, compute_get_by_uuid.call_count)
        compute_get_by_uuid.assert_has_calls(
            [mock.call(self.ctxt, uuids.cn_uuid)] * 2)

    @mock.patch.object(
        objects.CellMappingList,
        'get_all',
        return_value=objects.CellMappingList(objects=[
            objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID,
                                transport_url='mq://cell0',
                                database_connection='db://cell0'),
            objects.CellMapping(uuid=uuids.cell1_uuid,
                                transport_url='mq://fake1',
                                database_connection='db://fake1'),
            objects.CellMapping(uuid=uuids.cell2_uuid,
                                transport_url='mq://fake2',
                                database_connection='db://fake2')
        ]))
    @mock.patch.object(
        objects.ComputeNode,
        'get_by_uuid',
        side_effect=exception.ComputeHostNotFound(host=uuids.cn_uuid))
    def test_compute_node_get_not_found(self, compute_get_by_uuid,
                                        cell_mappings_get_all):
        """Tests that we can lookup a compute node in the HostAPI using a uuid
        and will fail with ComputeHostNotFound if we didn't find it in any
        cell.
        """
        self.assertRaises(exception.ComputeHostNotFound,
                          self.host_api.compute_node_get, self.ctxt,
                          uuids.cn_uuid)
        # cell0 should have been skipped, and the compute node wasn't found
        # in cell1 or cell2.
        self.assertEqual(2, compute_get_by_uuid.call_count)
        compute_get_by_uuid.assert_has_calls(
            [mock.call(self.ctxt, uuids.cn_uuid)] * 2)
Пример #18
0
def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk):
    return objects.ComputeNode(host=host,
                               memory_mb=total_mem,
                               local_gb=total_disk,
                               free_ram_mb=free_mem,
                               free_disk_gb=free_disk)
Пример #19
0
fake_compute_obj = objects.ComputeNode(
    host=_HOSTNAME,
    vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
    memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
    local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
    vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
    memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
    local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
    hypervisor_type='fake',
    hypervisor_version=0,
    hypervisor_hostname=_HOSTNAME,
    free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
                 _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
    free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
                  _VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
    current_workload=0,
    running_vms=0,
    cpu_info='{}',
    disk_available_least=0,
    host_ip='1.1.1.1',
    supported_hv_specs=[
        objects.HVSpec.from_list([
            obj_fields.Architecture.I686,
            obj_fields.HVType.KVM,
            obj_fields.VMMode.HVM])
    ],
    metrics=None,
    pci_device_pools=None,
    extra_resources=None,
    stats={},
    numa_topology=None,
    cpu_allocation_ratio=16.0,
    ram_allocation_ratio=1.5,
    disk_allocation_ratio=1.0,
    )
Пример #20
0
class IronicResourceTrackerTest(test.TestCase):
    """Tests the behaviour of the resource tracker with regards to the
    transitional period between adding support for custom resource classes in
    the placement API and integrating inventory and allocation records for
    Ironic baremetal nodes with those custom resource classes.
    """

    FLAVOR_FIXTURES = {
        'CUSTOM_SMALL_IRON':
        objects.Flavor(
            name='CUSTOM_SMALL_IRON',
            flavorid=42,
            vcpus=4,
            memory_mb=4096,
            root_gb=1024,
            swap=0,
            ephemeral_gb=0,
            extra_specs={},
        ),
        'CUSTOM_BIG_IRON':
        objects.Flavor(
            name='CUSTOM_BIG_IRON',
            flavorid=43,
            vcpus=16,
            memory_mb=65536,
            root_gb=1024,
            swap=0,
            ephemeral_gb=0,
            extra_specs={},
        ),
    }

    COMPUTE_NODE_FIXTURES = {
        uuids.cn1:
        objects.ComputeNode(
            uuid=uuids.cn1,
            hypervisor_hostname='cn1',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=4,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=4096,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=1024,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
        uuids.cn2:
        objects.ComputeNode(
            uuid=uuids.cn2,
            hypervisor_hostname='cn2',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=4,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=4096,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=1024,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
        uuids.cn3:
        objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='cn3',
            hypervisor_type='ironic',
            hypervisor_version=0,
            cpu_info="",
            host=COMPUTE_HOST,
            vcpus=16,
            vcpus_used=0,
            cpu_allocation_ratio=1.0,
            memory_mb=65536,
            memory_mb_used=0,
            ram_allocation_ratio=1.0,
            local_gb=2048,
            local_gb_used=0,
            disk_allocation_ratio=1.0,
        ),
    }

    INSTANCE_FIXTURES = {
        uuids.instance1:
        objects.Instance(
            uuid=uuids.instance1,
            flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'],
            vm_state=vm_states.BUILDING,
            task_state=task_states.SPAWNING,
            power_state=power_state.RUNNING,
            project_id='project',
            user_id=uuids.user,
        ),
    }

    def setUp(self):
        super(IronicResourceTrackerTest, self).setUp()
        self.flags(auth_strategy='noauth2', group='api')
        self.flags(
            reserved_host_memory_mb=0,
            cpu_allocation_ratio=1.0,
            ram_allocation_ratio=1.0,
            disk_allocation_ratio=1.0,
        )

        self.ctx = context.RequestContext('user', 'project')
        self.app = lambda: deploy.loadapp(CONF)
        self.report_client = test_report_client.NoAuthReportClient()

        driver = mock.MagicMock(autospec='nova.virt.driver.ComputeDriver')
        driver.node_is_available.return_value = True
        self.driver_mock = driver
        self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver)
        self.rt.scheduler_client.reportclient = self.report_client
        self.rt.reportclient = self.report_client
        self.url = 'http://localhost/placement'
        self.create_fixtures()

    def create_fixtures(self):
        for flavor in self.FLAVOR_FIXTURES.values():
            flavor._context = self.ctx
            flavor.obj_set_defaults()
            flavor.create()

        # We create some compute node records in the Nova cell DB to simulate
        # data before adding integration for Ironic baremetal nodes with the
        # placement API...
        for cn in self.COMPUTE_NODE_FIXTURES.values():
            cn._context = self.ctx
            cn.obj_set_defaults()
            cn.create()

        for instance in self.INSTANCE_FIXTURES.values():
            instance._context = self.ctx
            instance.obj_set_defaults()
            instance.create()

    def placement_get_inventory(self, rp_uuid):
        url = '/resource_providers/%s/inventories' % rp_uuid
        resp = self.report_client.get(url)
        if 200 <= resp.status_code < 300:
            return resp.json()['inventories']
        else:
            return resp.status_code

    def placement_get_allocations(self, consumer_uuid):
        url = '/allocations/%s' % consumer_uuid
        resp = self.report_client.get(url)
        if 200 <= resp.status_code < 300:
            return resp.json()['allocations']
        else:
            return resp.status_code

    def placement_get_custom_rcs(self):
        url = '/resource_classes'
        resp = self.report_client.get(url)
        if 200 <= resp.status_code < 300:
            all_rcs = resp.json()['resource_classes']
            return [
                rc['name'] for rc in all_rcs
                if rc['name'] not in fields.ResourceClass.STANDARD
            ]

    @mock.patch('nova.compute.utils.is_volume_backed_instance',
                return_value=False)
    @mock.patch('nova.objects.compute_node.ComputeNode.save')
    @mock.patch('keystoneauth1.session.Session.get_auth_headers',
                return_value={'x-auth-token': 'admin'})
    @mock.patch('keystoneauth1.session.Session.get_endpoint',
                return_value='http://localhost/placement')
    def test_ironic_ocata_to_pike(self, mock_vbi, mock_endpoint, mock_auth,
                                  mock_cn):
        """Check that when going from an Ocata installation with Ironic having
        node's resource class attributes set, that we properly "auto-heal" the
        inventory and allocation records in the placement API to account for
        both the old-style VCPU/MEMORY_MB/DISK_GB resources as well as the new
        custom resource class from Ironic's node.resource_class attribute.
        """
        with interceptor.RequestsInterceptor(app=self.app, url=self.url):
            # Before the resource tracker is "initialized", we shouldn't have
            # any compute nodes in the RT's cache...
            self.assertEqual(0, len(self.rt.compute_nodes))

            # There should not be any records in the placement API since we
            # haven't yet run update_available_resource() in the RT.
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                self.assertEqual(404, self.placement_get_inventory(cn.uuid))

            for inst in self.INSTANCE_FIXTURES.keys():
                self.assertEqual({}, self.placement_get_allocations(inst))

            # Nor should there be any custom resource classes in the placement
            # API, since we haven't had an Ironic node's resource class set yet
            self.assertEqual(0, len(self.placement_get_custom_rcs()))

            # Now "initialize" the resource tracker as if the compute host is a
            # Ocata host, with Ironic virt driver, but the admin has not yet
            # added a resource_class attribute to the Ironic baremetal nodes in
            # her system.
            # NOTE(jaypipes): This is what nova.compute.manager.ComputeManager
            # does when "initializing" the service...
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                nodename = cn.hypervisor_hostname
                self.driver_mock.get_available_resource.return_value = {
                    'hypervisor_hostname': nodename,
                    'hypervisor_type': 'ironic',
                    'hypervisor_version': 0,
                    'vcpus': cn.vcpus,
                    'vcpus_used': cn.vcpus_used,
                    'memory_mb': cn.memory_mb,
                    'memory_mb_used': cn.memory_mb_used,
                    'local_gb': cn.local_gb,
                    'local_gb_used': cn.local_gb_used,
                    'numa_topology': None,
                    'resource_class': None,  # Act like admin hasn't set yet...
                }
                self.driver_mock.get_inventory.return_value = {
                    VCPU: {
                        'total': cn.vcpus,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.vcpus,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                    MEMORY_MB: {
                        'total': cn.memory_mb,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.memory_mb,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                    DISK_GB: {
                        'total': cn.local_gb,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.local_gb,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                }
                self.rt.update_available_resource(self.ctx, nodename)

            self.assertEqual(3, len(self.rt.compute_nodes))
            # A canary just to make sure the assertion below about the custom
            # resource class being added wasn't already added somehow...
            crcs = self.placement_get_custom_rcs()
            self.assertNotIn('CUSTOM_SMALL_IRON', crcs)

            # Verify that the placement API has the "old-style" resources in
            # inventory and allocations
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                inv = self.placement_get_inventory(cn.uuid)
                self.assertEqual(3, len(inv))

            # Now "spawn" an instance to the first compute node by calling the
            # RT's instance_claim().
            cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
            cn1_nodename = cn1_obj.hypervisor_hostname
            inst = self.INSTANCE_FIXTURES[uuids.instance1]
            # Since we're pike, the scheduler would have created our
            # allocation for us. So, we can use our old update routine
            # here to mimic that before we go do the compute RT claim,
            # and then the checks below.
            self.rt.reportclient.update_instance_allocation(
                self.ctx, cn1_obj, inst, 1)
            with self.rt.instance_claim(self.ctx, inst, cn1_nodename):
                pass

            allocs = self.placement_get_allocations(inst.uuid)
            self.assertEqual(1, len(allocs))
            self.assertIn(uuids.cn1, allocs)

            resources = allocs[uuids.cn1]['resources']
            self.assertEqual(3, len(resources))
            for rc in (VCPU, MEMORY_MB, DISK_GB):
                self.assertIn(rc, resources)

            # Now we emulate the operator setting ONE of the Ironic node's
            # resource class attribute to the value of a custom resource class
            # and re-run update_available_resource(). We will expect to see the
            # inventory and allocations reset for the first compute node that
            # had an instance on it. The new inventory and allocation records
            # will be for VCPU, MEMORY_MB, DISK_GB, and also a new record for
            # the custom resource class of the Ironic node.
            self.driver_mock.get_available_resource.return_value = {
                'hypervisor_hostname': cn1_obj.hypervisor_hostname,
                'hypervisor_type': 'ironic',
                'hypervisor_version': 0,
                'vcpus': cn1_obj.vcpus,
                'vcpus_used': cn1_obj.vcpus_used,
                'memory_mb': cn1_obj.memory_mb,
                'memory_mb_used': cn1_obj.memory_mb_used,
                'local_gb': cn1_obj.local_gb,
                'local_gb_used': cn1_obj.local_gb_used,
                'numa_topology': None,
                'resource_class': 'small-iron',
            }
            self.driver_mock.get_inventory.return_value = {
                VCPU: {
                    'total': cn1_obj.vcpus,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.vcpus,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                MEMORY_MB: {
                    'total': cn1_obj.memory_mb,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.memory_mb,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                DISK_GB: {
                    'total': cn1_obj.local_gb,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.local_gb,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                'CUSTOM_SMALL_IRON': {
                    'total': 1,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 1,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            self.rt.update_available_resource(self.ctx, cn1_nodename)

            # Verify the auto-creation of the custom resource class, normalized
            # to what the placement API expects
            self.assertIn('CUSTOM_SMALL_IRON', self.placement_get_custom_rcs())

            allocs = self.placement_get_allocations(inst.uuid)
            self.assertEqual(1, len(allocs))
            self.assertIn(uuids.cn1, allocs)

            resources = allocs[uuids.cn1]['resources']
            self.assertEqual(3, len(resources))
            for rc in (VCPU, MEMORY_MB, DISK_GB):
                self.assertIn(rc, resources)
Пример #21
0
from nova.objects import pci_device_pool
from nova.pci import device
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device

pci_stats = [{
    "count": 3,
    "vendor_id": "8086",
    "product_id": "1520",
    "extra_info": {
        "phys_function": '[["0x0000", "0x04", '
        '"0x00", "0x1"]]'
    }
}]
fake_compute_node = objects.ComputeNode(
    pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))


class FakeResponse(wsgi.ResponseObject):
    pass


class PciServerControllerTestV21(test.NoDBTestCase):
    def setUp(self):
        super(PciServerControllerTestV21, self).setUp()
        self.controller = pci.PciServerController()
        self.fake_obj = {
            'server': {
                'addresses': {},
                'id': 'fb08',
                'name': 'a3',
Пример #22
0
 def test_create_ironic_node_state(self, init_mock):
     init_mock.return_value = None
     compute = objects.ComputeNode(**{'hypervisor_type': 'ironic'})
     host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
                                                   compute=compute)
     self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
Пример #23
0
 def test_compat_service_id(self, mock_get):
     mock_get.return_value = objects.Service(id=1)
     compute = objects.ComputeNode(host='fake-host', service_id=None)
     primitive = compute.obj_to_primitive(target_version='1.12')
     self.assertEqual(1, primitive['nova_object.data']['service_id'])
Пример #24
0
 def test_create_non_ironic_host_state(self, init_mock):
     init_mock.return_value = None
     compute = objects.ComputeNode(**{'cpu_info': 'other cpu'})
     host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
                                                   compute=compute)
     self.assertIs(host_manager.HostState, type(host_state))
Пример #25
0
 def test_check_instance_has_no_numa_passes_no_numa(self, mock_get):
     self.flags(enable_numa_live_migration=False, group='workarounds')
     self.task.instance.numa_topology = None
     mock_get.return_value = objects.ComputeNode(
         uuid=uuids.cn1, hypervisor_type='qemu')
     self.task._check_instance_has_no_numa()
Пример #26
0
    def test_tree_ops(self):
        cn1 = self.compute_node1
        cn2 = self.compute_node2
        pt = self._pt_with_cns()

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn1.hypervisor_hostname,
            cn1.uuid,
        )

        self.assertTrue(pt.exists(cn1.uuid))
        self.assertTrue(pt.exists(cn1.hypervisor_hostname))
        self.assertFalse(pt.exists(uuids.non_existing_rp))
        self.assertFalse(pt.exists('noexist'))

        self.assertEqual([cn1.uuid],
                         pt.get_provider_uuids(name_or_uuid=cn1.uuid))
        # Same with ..._in_tree
        self.assertEqual([cn1.uuid], pt.get_provider_uuids_in_tree(cn1.uuid))
        self.assertEqual(set([cn1.uuid, cn2.uuid]),
                         set(pt.get_provider_uuids()))

        numa_cell0_uuid = pt.new_child('numa_cell0', cn1.uuid)
        numa_cell1_uuid = pt.new_child('numa_cell1', cn1.hypervisor_hostname)

        self.assertEqual(cn1.uuid, pt.data(numa_cell1_uuid).parent_uuid)

        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists('numa_cell0'))

        self.assertTrue(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists('numa_cell1'))

        pf1_cell0_uuid = pt.new_child('pf1_cell0', numa_cell0_uuid)
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists('pf1_cell0'))

        # Now we've got a 3-level tree under cn1 - check provider UUIDs again
        all_cn1 = [cn1.uuid, numa_cell0_uuid, pf1_cell0_uuid, numa_cell1_uuid]
        self.assertEqual(set(all_cn1),
                         set(pt.get_provider_uuids(name_or_uuid=cn1.uuid)))
        # Same with ..._in_tree if we're asking for the root
        self.assertEqual(set(all_cn1),
                         set(pt.get_provider_uuids_in_tree(cn1.uuid)))
        # Asking for a subtree.
        self.assertEqual([numa_cell0_uuid, pf1_cell0_uuid],
                         pt.get_provider_uuids(name_or_uuid=numa_cell0_uuid))
        # With ..._in_tree, get the whole tree no matter which we specify.
        for node in all_cn1:
            self.assertEqual(set(all_cn1),
                             set(pt.get_provider_uuids_in_tree(node)))
        # With no provider specified, get everything
        self.assertEqual(
            set([
                cn1.uuid, cn2.uuid, numa_cell0_uuid, pf1_cell0_uuid,
                numa_cell1_uuid
            ]), set(pt.get_provider_uuids()))

        self.assertRaises(
            ValueError,
            pt.new_child,
            'pf1_cell0',
            uuids.non_existing_rp,
        )

        # Fail attempting to add a child that already exists in the tree
        # Existing provider is a child; search by name
        self.assertRaises(ValueError, pt.new_child, 'numa_cell0', cn1.uuid)
        # Existing provider is a root; search by UUID
        self.assertRaises(ValueError, pt.new_child, cn1.uuid, cn2.uuid)

        # Test data().
        # Root, by UUID
        cn1_snap = pt.data(cn1.uuid)
        # Fields were faithfully copied
        self.assertEqual(cn1.uuid, cn1_snap.uuid)
        self.assertEqual(cn1.hypervisor_hostname, cn1_snap.name)
        self.assertIsNone(cn1_snap.parent_uuid)
        self.assertEqual({}, cn1_snap.inventory)
        self.assertEqual(set(), cn1_snap.traits)
        self.assertEqual(set(), cn1_snap.aggregates)
        # Validate read-only-ness
        self.assertRaises(AttributeError, setattr, cn1_snap, 'name', 'foo')

        cn3 = objects.ComputeNode(
            uuid=uuids.cn3,
            hypervisor_hostname='compute-node-3',
        )
        self.assertFalse(pt.exists(cn3.uuid))
        self.assertFalse(pt.exists(cn3.hypervisor_hostname))
        pt.new_root(cn3.hypervisor_hostname, cn3.uuid)

        self.assertTrue(pt.exists(cn3.uuid))
        self.assertTrue(pt.exists(cn3.hypervisor_hostname))

        self.assertRaises(
            ValueError,
            pt.new_root,
            cn3.hypervisor_hostname,
            cn3.uuid,
        )

        self.assertRaises(
            ValueError,
            pt.remove,
            uuids.non_existing_rp,
        )

        pt.remove(numa_cell1_uuid)
        self.assertFalse(pt.exists(numa_cell1_uuid))
        self.assertTrue(pt.exists(pf1_cell0_uuid))
        self.assertTrue(pt.exists(numa_cell0_uuid))
        self.assertTrue(pt.exists(uuids.cn1))

        # Now remove the root and check that children no longer exist
        pt.remove(uuids.cn1)
        self.assertFalse(pt.exists(pf1_cell0_uuid))
        self.assertFalse(pt.exists(numa_cell0_uuid))
        self.assertFalse(pt.exists(uuids.cn1))
Пример #27
0
 def _create_tracker(self, fake_devs):
     self.fake_devs = fake_devs
     self.tracker = manager.PciDevTracker(
         self.fake_context, objects.ComputeNode(id=1, numa_topology=None))
Пример #28
0
            l3_size=0,
            l3_granularity=0,
            l3_both_used=0,
            l3_code_used=0,
            l3_data_used=0),
    ]),
]

COMPUTE_NODES = [
        objects.ComputeNode(
            uuid=uuidsentinel.cn1,
            id=1, local_gb=1024, memory_mb=1024, vcpus=1,
            disk_available_least=None, free_ram_mb=512, vcpus_used=1,
            free_disk_gb=512, local_gb_used=0,
            updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
            host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
            hypervisor_version=0, numa_topology=None,
            hypervisor_type='foo', supported_hv_specs=[],
            pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
            cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
            disk_allocation_ratio=1.0,
            l3_closids=16,
            l3_closids_used=1),
        objects.ComputeNode(
            uuid=uuidsentinel.cn2,
            id=2, local_gb=2048, memory_mb=2048, vcpus=2,
            disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
            free_disk_gb=1024, local_gb_used=0,
            updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
            host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
            hypervisor_version=0, numa_topology=None,
            hypervisor_type='foo', supported_hv_specs=[],
Пример #29
0
                    report_count=5,
                    disabled=False,
                    disabled_reason=None,
                    availability_zone="nova"),
    objects.Service(id=2,
                    host="compute2",
                    binary="nova-compute",
                    topic="compute_topic",
                    report_count=5,
                    disabled=False,
                    disabled_reason=None,
                    availability_zone="nova"),
]

TEST_HYPERS_OBJ = [
    objects.ComputeNode(**hyper_dct) for hyper_dct in TEST_HYPERS
]

TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})

TEST_SERVERS = [
    dict(name="inst1", uuid="uuid1", host="compute1"),
    dict(name="inst2", uuid="uuid2", host="compute2"),
    dict(name="inst3", uuid="uuid3", host="compute1"),
    dict(name="inst4", uuid="uuid4", host="compute2")
]


def fake_compute_node_get_all(context):
    return TEST_HYPERS_OBJ
Пример #30
0
import nova.conf
from nova import config
from nova import objects
from nova import context
from nova import db
from nova.db.sqlalchemy import models

LOG = logging.getLogger(__name__)

CONF = nova.conf.CONF
argv = []
default_config_files = ['/etc/nova/nova.conf']
config.parse_args(argv, default_config_files=default_config_files)
objects.register_all()
context = context.get_admin_context()
nodes = objects.ComputeNodeList.get_all(context)
[it['deleted'] for it in nodes]

import nova.db.sqlalchemy.api as api

cxt = api.get_context_manager(context)
t = cxt.writer.using(context)
s = t.__enter__()
s.query(models.ComputeNode).first()
s.rollback()
s.close()

cn = objects.ComputeNode(context)
cn.memory_mb_used