class IronicResourceTrackerTest(test_base.SchedulerReportClientTestBase): """Tests the behaviour of the resource tracker with regards to the transitional period between adding support for custom resource classes in the placement API and integrating inventory and allocation records for Ironic baremetal nodes with those custom resource classes. """ FLAVOR_FIXTURES = { 'CUSTOM_SMALL_IRON': objects.Flavor( name='CUSTOM_SMALL_IRON', flavorid=42, vcpus=4, memory_mb=4096, root_gb=1024, swap=0, ephemeral_gb=0, extra_specs={}, ), 'CUSTOM_BIG_IRON': objects.Flavor( name='CUSTOM_BIG_IRON', flavorid=43, vcpus=16, memory_mb=65536, root_gb=1024, swap=0, ephemeral_gb=0, extra_specs={}, ), } COMPUTE_NODE_FIXTURES = { uuids.cn1: objects.ComputeNode( uuid=uuids.cn1, hypervisor_hostname='cn1', hypervisor_type='ironic', hypervisor_version=0, cpu_info="", host=COMPUTE_HOST, vcpus=4, vcpus_used=0, cpu_allocation_ratio=1.0, memory_mb=4096, memory_mb_used=0, ram_allocation_ratio=1.0, local_gb=1024, local_gb_used=0, disk_allocation_ratio=1.0, ), uuids.cn2: objects.ComputeNode( uuid=uuids.cn2, hypervisor_hostname='cn2', hypervisor_type='ironic', hypervisor_version=0, cpu_info="", host=COMPUTE_HOST, vcpus=4, vcpus_used=0, cpu_allocation_ratio=1.0, memory_mb=4096, memory_mb_used=0, ram_allocation_ratio=1.0, local_gb=1024, local_gb_used=0, disk_allocation_ratio=1.0, ), uuids.cn3: objects.ComputeNode( uuid=uuids.cn3, hypervisor_hostname='cn3', hypervisor_type='ironic', hypervisor_version=0, cpu_info="", host=COMPUTE_HOST, vcpus=16, vcpus_used=0, cpu_allocation_ratio=1.0, memory_mb=65536, memory_mb_used=0, ram_allocation_ratio=1.0, local_gb=2048, local_gb_used=0, disk_allocation_ratio=1.0, ), } INSTANCE_FIXTURES = { uuids.instance1: objects.Instance( uuid=uuids.instance1, flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'], vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING, power_state=power_state.RUNNING, project_id='project', user_id=uuids.user, ), } def _set_client(self, client): """Set up embedded report clients to use the direct one from the interceptor. """ self.report_client = client self.rt.scheduler_client.reportclient = client self.rt.reportclient = client def setUp(self): super(IronicResourceTrackerTest, self).setUp() self.flags( reserved_host_memory_mb=0, cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0, disk_allocation_ratio=1.0, ) self.ctx = context.RequestContext('user', 'project') driver = mock.MagicMock(autospec=virt_driver.ComputeDriver) driver.node_is_available.return_value = True driver.update_provider_tree.side_effect = NotImplementedError self.driver_mock = driver self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver) self.instances = self.create_fixtures() def create_fixtures(self): for flavor in self.FLAVOR_FIXTURES.values(): # Clone the object so the class variable isn't # modified by reference. flavor = flavor.obj_clone() flavor._context = self.ctx flavor.obj_set_defaults() flavor.create() # We create some compute node records in the Nova cell DB to simulate # data before adding integration for Ironic baremetal nodes with the # placement API... for cn in self.COMPUTE_NODE_FIXTURES.values(): # Clone the object so the class variable isn't # modified by reference. cn = cn.obj_clone() cn._context = self.ctx cn.obj_set_defaults() cn.create() instances = {} for instance in self.INSTANCE_FIXTURES.values(): # Clone the object so the class variable isn't # modified by reference. instance = instance.obj_clone() instance._context = self.ctx instance.obj_set_defaults() instance.create() instances[instance.uuid] = instance return instances @mock.patch('nova.compute.utils.is_volume_backed_instance', new=mock.Mock(return_value=False)) @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock()) def test_node_stats_isolation(self): """Regression test for bug 1784705 introduced in Ocata. The ResourceTracker.stats field is meant to track per-node stats so this test registers three compute nodes with a single RT where each node has unique stats, and then makes sure that after updating usage for an instance, the nodes still have their unique stats and nothing is leaked from node to node. """ self.useFixture(nova_fixtures.PlacementFixture()) # Before the resource tracker is "initialized", we shouldn't have # any compute nodes or stats in the RT's cache... self.assertEqual(0, len(self.rt.compute_nodes)) self.assertEqual(0, len(self.rt.stats)) # Now "initialize" the resource tracker. This is what # nova.compute.manager.ComputeManager does when "initializing" the # nova-compute service. Do this in a predictable order so cn1 is # first and cn3 is last. for cn in sorted(self.COMPUTE_NODE_FIXTURES.values(), key=lambda _cn: _cn.hypervisor_hostname): nodename = cn.hypervisor_hostname # Fake that each compute node has unique extra specs stats and # the RT makes sure those are unique per node. stats = {'node:%s' % nodename: nodename} self.driver_mock.get_available_resource.return_value = { 'hypervisor_hostname': nodename, 'hypervisor_type': 'ironic', 'hypervisor_version': 0, 'vcpus': cn.vcpus, 'vcpus_used': cn.vcpus_used, 'memory_mb': cn.memory_mb, 'memory_mb_used': cn.memory_mb_used, 'local_gb': cn.local_gb, 'local_gb_used': cn.local_gb_used, 'numa_topology': None, 'resource_class': None, # Act like admin hasn't set yet... 'stats': stats, } self.driver_mock.get_inventory.return_value = { 'CUSTOM_SMALL_IRON': { 'total': 1, 'reserved': 0, 'min_unit': 1, 'max_unit': 1, 'step_size': 1, 'allocation_ratio': 1.0, }, } self.rt.update_available_resource(self.ctx, nodename) self.assertEqual(3, len(self.rt.compute_nodes)) self.assertEqual(3, len(self.rt.stats)) def _assert_stats(): # Make sure each compute node has a unique set of stats and # they don't accumulate across nodes. for _cn in self.rt.compute_nodes.values(): node_stats_key = 'node:%s' % _cn.hypervisor_hostname self.assertIn(node_stats_key, _cn.stats) node_stat_count = 0 for stat in _cn.stats: if stat.startswith('node:'): node_stat_count += 1 self.assertEqual(1, node_stat_count, _cn.stats) _assert_stats() # Now "spawn" an instance to the first compute node by calling the # RT's instance_claim(). cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1] cn1_nodename = cn1_obj.hypervisor_hostname inst = self.instances[uuids.instance1] with self.rt.instance_claim(self.ctx, inst, cn1_nodename): _assert_stats()
class _TestBlockDeviceMappingObject(object): def fake_bdm(self, instance=None): instance = instance or {} fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': instance.get('uuid') or uuids.instance, 'attachment_id': None, 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'boot_index': -1 }) if instance: fake_bdm['instance'] = instance return fake_bdm def _test_save(self, cell_type=None, update_device_name=False): if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') create = False fake_bdm = self.fake_bdm() with test.nested( mock.patch.object(db, 'block_device_mapping_update', return_value=fake_bdm), mock.patch.object( cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')) as (bdm_update_mock, cells_update_mock): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.id = 123 bdm_object.volume_id = 'fake_volume_id' if update_device_name: bdm_object.device_name = '/dev/vda' create = None bdm_object.save() if update_device_name: bdm_update_mock.assert_called_once_with( self.context, 123, { 'volume_id': 'fake_volume_id', 'device_name': '/dev/vda' }, legacy=False) else: bdm_update_mock.assert_called_once_with( self.context, 123, {'volume_id': 'fake_volume_id'}, legacy=False) if cell_type != 'compute': self.assertFalse(cells_update_mock.called) else: self.assertEqual(1, cells_update_mock.call_count) self.assertGreater(len(cells_update_mock.call_args[0]), 1) self.assertIsInstance(cells_update_mock.call_args[0][1], block_device_obj.BlockDeviceMapping) self.assertEqual({'create': create}, cells_update_mock.call_args[1]) def test_save_nocells(self): self._test_save() def test_save_apicell(self): self._test_save(cell_type='api') def test_save_computecell(self): self._test_save(cell_type='compute') def test_save_computecell_device_name_changed(self): self._test_save(cell_type='compute', update_device_name=True) def test_save_instance_changed(self): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.instance = objects.Instance() self.assertRaises(exception.ObjectActionError, bdm_object.save) @mock.patch.object(db, 'block_device_mapping_update', return_value=None) def test_save_not_found(self, bdm_update): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.id = 123 self.assertRaises(exception.BDMNotFound, bdm_object.save) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_id(self, get_by_vol_id): # NOTE(danms): Include two results to make sure the first was picked. # An invalid second item shouldn't be touched -- if it is, it'll # fail from_db_object(). get_by_vol_id.return_value = [self.fake_bdm(), None] vol_bdm = objects.BlockDeviceMapping.get_by_volume_id( self.context, 'fake-volume-id') for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertFalse(vol_bdm.obj_attr_is_set(attr)) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_id_not_found(self, get_by_vol_id): get_by_vol_id.return_value = None self.assertRaises(exception.VolumeBDMNotFound, objects.BlockDeviceMapping.get_by_volume_id, self.context, 'fake-volume-id') @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_instance_uuid_mismatch(self, get_by_vol_id): fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'}) get_by_vol_id.return_value = [fake_bdm_vol] self.assertRaises(exception.InvalidVolume, objects.BlockDeviceMapping.get_by_volume_id, self.context, 'fake-volume-id', instance_uuid='fake-instance') @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_id_with_expected(self, get_by_vol_id): get_by_vol_id.return_value = [ self.fake_bdm(fake_instance.fake_db_instance()) ] vol_bdm = objects.BlockDeviceMapping.get_by_volume_id( self.context, 'fake-volume-id', expected_attrs=['instance']) for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertTrue(vol_bdm.obj_attr_is_set(attr)) get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id', ['instance']) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_returned_single(self, get_all): fake_bdm_vol = self.fake_bdm() get_all.return_value = [fake_bdm_vol] vol_bdm = objects.BlockDeviceMapping.get_by_volume( self.context, 'fake-volume-id') self.assertEqual(fake_bdm_vol['id'], vol_bdm.id) @mock.patch.object(db, 'block_device_mapping_get_all_by_volume_id') def test_get_by_volume_returned_multiple(self, get_all): fake_bdm_vol1 = self.fake_bdm() fake_bdm_vol2 = self.fake_bdm() get_all.return_value = [fake_bdm_vol1, fake_bdm_vol2] self.assertRaises(exception.VolumeBDMIsMultiAttach, objects.BlockDeviceMapping.get_by_volume, self.context, 'fake-volume-id') @mock.patch.object(db, 'block_device_mapping_get_by_instance_and_volume_id') def test_get_by_instance_and_volume_id(self, mock_get): fake_inst = fake_instance.fake_db_instance() mock_get.return_value = self.fake_bdm(fake_inst) obj_bdm = objects.BlockDeviceMapping vol_bdm = obj_bdm.get_by_volume_and_instance(self.context, 'fake-volume-id', 'fake-instance-id') for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertFalse(vol_bdm.obj_attr_is_set(attr)) @mock.patch.object(db, 'block_device_mapping_get_by_instance_and_volume_id') def test_test_get_by_instance_and_volume_id_with_expected(self, mock_get): fake_inst = fake_instance.fake_db_instance() mock_get.return_value = self.fake_bdm(fake_inst) obj_bdm = objects.BlockDeviceMapping vol_bdm = obj_bdm.get_by_volume_and_instance( self.context, 'fake-volume-id', fake_inst['uuid'], expected_attrs=['instance']) for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS: self.assertTrue(vol_bdm.obj_attr_is_set(attr)) mock_get.assert_called_once_with(self.context, 'fake-volume-id', fake_inst['uuid'], ['instance']) @mock.patch.object(db, 'block_device_mapping_get_by_instance_and_volume_id') def test_get_by_instance_and_volume_id_not_found(self, mock_get): mock_get.return_value = None obj_bdm = objects.BlockDeviceMapping self.assertRaises(exception.VolumeBDMNotFound, obj_bdm.get_by_volume_and_instance, self.context, 'fake-volume-id', 'fake-instance-id') def _test_create_mocked(self, cell_type=None, update_or_create=False, device_name=None): if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': uuids.instance, 'attachment_id': None } if device_name: values['device_name'] = device_name fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values) with test.nested( mock.patch.object(db, 'block_device_mapping_create', return_value=fake_bdm), mock.patch.object(db, 'block_device_mapping_update_or_create', return_value=fake_bdm), mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')) as ( bdm_create_mock, bdm_update_or_create_mock, cells_update_mock): bdm = objects.BlockDeviceMapping(context=self.context, **values) if update_or_create: method = bdm.update_or_create else: method = bdm.create if cell_type == 'api': self.assertRaises(exception.ObjectActionError, method) else: method() if update_or_create: bdm_update_or_create_mock.assert_called_once_with( self.context, values, legacy=False) else: bdm_create_mock.assert_called_once_with(self.context, values, legacy=False) if cell_type == 'compute' and 'device_name' in values: self.assertEqual(1, cells_update_mock.call_count) self.assertGreater(len(cells_update_mock.call_args[0]), 1) self.assertEqual(self.context, cells_update_mock.call_args[0][0]) self.assertIsInstance(cells_update_mock.call_args[0][1], block_device_obj.BlockDeviceMapping) self.assertEqual({'create': update_or_create or None}, cells_update_mock.call_args[1]) else: self.assertFalse(cells_update_mock.called) def test_create_nocells(self): self._test_create_mocked() def test_update_or_create(self): self._test_create_mocked(update_or_create=True) def test_create_apicell(self): self._test_create_mocked(cell_type='api') def test_update_or_create_apicell(self): self._test_create_mocked(cell_type='api', update_or_create=True) def test_create_computecell(self): self._test_create_mocked(cell_type='compute') def test_update_or_create_computecell(self): self._test_create_mocked(cell_type='compute', update_or_create=True) def test_device_name_compute_cell(self): self._test_create_mocked(cell_type='compute', device_name='/dev/xvdb') def test_create(self): values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': uuids.instance } bdm = objects.BlockDeviceMapping(context=self.context, **values) with mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top'): bdm.create() for k, v in values.items(): self.assertEqual(v, getattr(bdm, k)) def test_create_fails(self): values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': uuids.instance } bdm = objects.BlockDeviceMapping(context=self.context, **values) bdm.create() self.assertRaises(exception.ObjectActionError, bdm.create) def test_create_fails_instance(self): values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': uuids.instance, 'instance': objects.Instance() } bdm = objects.BlockDeviceMapping(context=self.context, **values) self.assertRaises(exception.ObjectActionError, bdm.create) def _test_destroy_mocked(self, cell_type=None): values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'id': 1, 'instance_uuid': uuids.instance, 'device_name': 'fake' } if cell_type: self.flags(enable=True, cell_type=cell_type, group='cells') else: self.flags(enable=False, group='cells') with test.nested( mock.patch.object(db, 'block_device_mapping_destroy'), mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_destroy_at_top')) as (bdm_del, cells_destroy): bdm = objects.BlockDeviceMapping(context=self.context, **values) bdm.destroy() bdm_del.assert_called_once_with(self.context, values['id']) if cell_type != 'compute': self.assertFalse(cells_destroy.called) else: cells_destroy.assert_called_once_with( self.context, values['instance_uuid'], device_name=values['device_name'], volume_id=values['volume_id']) def test_destroy_nocells(self): self._test_destroy_mocked() def test_destroy_apicell(self): self._test_destroy_mocked(cell_type='api') def test_destroy_computecell(self): self._test_destroy_mocked(cell_type='compute') def test_is_image_true(self): bdm = objects.BlockDeviceMapping(context=self.context, source_type='image') self.assertTrue(bdm.is_image) def test_is_image_false(self): bdm = objects.BlockDeviceMapping(context=self.context, source_type='snapshot') self.assertFalse(bdm.is_image) def test_is_volume_true(self): bdm = objects.BlockDeviceMapping(context=self.context, destination_type='volume') self.assertTrue(bdm.is_volume) def test_is_volume_false(self): bdm = objects.BlockDeviceMapping(context=self.context, destination_type='local') self.assertFalse(bdm.is_volume) def test_obj_load_attr_not_instance(self): """Tests that lazy-loading something other than the instance field results in an error. """ bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm()) self.assertRaises(exception.ObjectActionError, bdm.obj_load_attr, 'invalid') def test_obj_load_attr_orphaned(self): """Tests that lazy-loading the instance field on an orphaned BDM results in an error. """ bdm = objects.BlockDeviceMapping(context=None, **self.fake_bdm()) self.assertRaises(exception.OrphanedObjectError, bdm.obj_load_attr, 'instance') @mock.patch.object(objects.Instance, 'get_by_uuid', return_value=objects.Instance(uuid=uuids.instance)) def test_obj_load_attr_instance(self, mock_inst_get_by_uuid): """Tests lazy-loading the instance field.""" bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm()) self.assertEqual(mock_inst_get_by_uuid.return_value, bdm.instance) mock_inst_get_by_uuid.assert_called_once_with(self.context, bdm.instance_uuid) def test_obj_make_compatible_pre_1_17(self): values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': uuids.instance } bdm = objects.BlockDeviceMapping(context=self.context, **values) primitive = bdm.obj_to_primitive(target_version='1.16') self.assertNotIn('tag', primitive) def test_obj_make_compatible_pre_1_18(self): values = { 'source_type': 'volume', 'volume_id': 'fake-vol-id', 'destination_type': 'volume', 'instance_uuid': uuids.instance, 'attachment_id': uuids.attachment_id } bdm = objects.BlockDeviceMapping(context=self.context, **values) primitive = bdm.obj_to_primitive(target_version='1.17') self.assertNotIn('attachment_id', primitive)
def test_save_instance_changed(self): bdm_object = objects.BlockDeviceMapping(context=self.context) bdm_object.instance = objects.Instance() self.assertRaises(exception.ObjectActionError, bdm_object.save)
def compute_api_get(self, context, instance_id, expected_attrs=None, want_objects=False): return objects.Instance(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
import mock from oslo_utils.fixture import uuidsentinel as uuids import six from nova.api.openstack.compute import server_external_events \ as server_external_events_v21 from nova import exception from nova import objects from nova.objects import instance as instance_obj from nova import test from nova.tests import fixtures from nova.tests.unit.api.openstack import fakes fake_instances = { '00000000-0000-0000-0000-000000000001': objects.Instance(id=1, uuid='00000000-0000-0000-0000-000000000001', host='host1'), '00000000-0000-0000-0000-000000000002': objects.Instance(id=2, uuid='00000000-0000-0000-0000-000000000002', host='host1'), '00000000-0000-0000-0000-000000000003': objects.Instance(id=3, uuid='00000000-0000-0000-0000-000000000003', host='host2'), '00000000-0000-0000-0000-000000000004': objects.Instance(id=4, uuid='00000000-0000-0000-0000-000000000004', host=None), } fake_instance_uuids = sorted(fake_instances.keys()) MISSING_UUID = '00000000-0000-0000-0000-000000000005' fake_cells = [objects.CellMapping(uuid=uuids.cell1, database_connection="db1"), objects.CellMapping(uuid=uuids.cell2, database_connection="db2")] fake_instance_mappings = [ objects.InstanceMapping(cell_mapping=fake_cells[instance.id % 2], instance_uuid=instance.uuid)
def fake_vpn_instance(): return objects.Instance( id=7, image_ref=CONF.vpn_image_id, vm_state='active', created_at=timeutils.parse_strtime('1981-10-20T00:00:00.000000'), uuid=uuid, project_id=project_id)