def test_traits_get_destroyed_after_destroying_a_node_by_uuid(self): node = utils.create_test_node() trait = utils.create_test_node_trait(node_id=node.id) self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait)) self.dbapi.destroy_node(node.uuid) self.assertRaises(exception.NodeNotFound, self.dbapi.node_trait_exists, node.id, trait.trait)
def test_update_node_with_traits(self): node = utils.create_test_node() trait = utils.create_test_node_trait(node_id=node.id) old_extra = node.extra new_extra = {'foo': 'bar'} self.assertNotEqual(old_extra, new_extra) res = self.dbapi.update_node(node.id, {'extra': new_extra}) self.assertEqual([trait.trait], [t.trait for t in res.traits])
def test_nodes_changed_after_lock(self, mock_acquire): nodes = [ obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid(), resource_class='x-large', power_state='power off', provision_state='available') for _ in range(5) ] for node in nodes: db_utils.create_test_node_trait(trait='tr1', node_id=node.id) # Modify nodes in-memory so that they no longer match the allocation: # Resource class does not match nodes[0].resource_class = 'x-small' # Provision state is not available nodes[1].provision_state = 'deploying' # Maintenance mode is on nodes[2].maintenance = True # Already associated nodes[3].instance_uuid = uuidutils.generate_uuid() # Traits changed nodes[4].traits.objects[:] = [] mock_acquire.side_effect = [ mock.MagicMock(**{'__enter__.return_value.node': node}) for node in nodes ] allocation = obj_utils.create_test_allocation(self.context, resource_class='x-large', traits=['tr1']) allocations.do_allocate(self.context, allocation) self.assertIn('all nodes were filtered out', allocation['last_error']) self.assertEqual('error', allocation['state']) # No retries for these failures. self.assertEqual(5, mock_acquire.call_count)
def test_nodes_changed_after_lock(self, mock_acquire): nodes = [obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid(), resource_class='x-large', power_state='power off', provision_state='available') for _ in range(5)] for node in nodes: db_utils.create_test_node_trait(trait='tr1', node_id=node.id) # Modify nodes in-memory so that they no longer match the allocation: # Resource class does not match nodes[0].resource_class = 'x-small' # Provision state is not available nodes[1].provision_state = 'deploying' # Maintenance mode is on nodes[2].maintenance = True # Already associated nodes[3].instance_uuid = uuidutils.generate_uuid() # Traits changed nodes[4].traits.objects[:] = [] mock_acquire.side_effect = [ mock.MagicMock(**{'__enter__.return_value.node': node}) for node in nodes ] allocation = obj_utils.create_test_allocation(self.context, resource_class='x-large', traits=['tr1']) allocations.do_allocate(self.context, allocation) self.assertIn('all nodes were filtered out', allocation['last_error']) self.assertEqual('error', allocation['state']) # No retries for these failures. self.assertEqual(5, mock_acquire.call_count)
def _create_test_data(self): allocated_node_id = 31 fake_db_allocation = db_utils.create_test_allocation( node_id=allocated_node_id, resource_class="CUSTOM_TEST") fake_db_node = db_utils.create_test_node( chassis_id=None, driver='fake-driverz', owner='z') fake_db_node_alloced = db_utils.create_test_node( id=allocated_node_id, chassis_id=None, allocation_id=fake_db_allocation['id'], uuid='22e26c0b-03f2-4d2e-ae87-c02d7f33c000', driver='fake-driverz', owner='z') fake_vif_port_id = "ee21d58f-5de2-4956-85ff-33935ea1ca00" fake_db_port = db_utils.create_test_port( node_id=fake_db_node['id'], internal_info={'tenant_vif_port_id': fake_vif_port_id}) fake_db_portgroup = db_utils.create_test_portgroup( uuid="6eb02b44-18a3-4659-8c0b-8d2802581ae4", node_id=fake_db_node['id']) fake_db_chassis = db_utils.create_test_chassis( drivers=['fake-hardware', 'fake-driverz', 'fake-driver']) fake_db_deploy_template = db_utils.create_test_deploy_template() fake_db_conductor = db_utils.create_test_conductor() fake_db_volume_target = db_utils.create_test_volume_target( node_id=fake_db_allocation['id']) fake_db_volume_connector = db_utils.create_test_volume_connector( node_id=fake_db_allocation['id']) # Trait name aligns with create_test_node_trait. fake_trait = 'trait' fake_setting = 'FAKE_SETTING' db_utils.create_test_bios_setting( node_id=fake_db_node['id'], name=fake_setting, value=fake_setting) db_utils.create_test_node_trait( node_id=fake_db_node['id']) # dedicated node for portgroup addition test to avoid # false positives with test runners. db_utils.create_test_node( uuid='18a552fb-dcd2-43bf-9302-e4c93287be11') self.format_data.update({ 'node_ident': fake_db_node['uuid'], 'allocated_node_ident': fake_db_node_alloced['uuid'], 'port_ident': fake_db_port['uuid'], 'portgroup_ident': fake_db_portgroup['uuid'], 'chassis_ident': fake_db_chassis['uuid'], 'deploy_template_ident': fake_db_deploy_template['uuid'], 'allocation_ident': fake_db_allocation['uuid'], 'conductor_ident': fake_db_conductor['hostname'], 'vif_ident': fake_vif_port_id, # Can't use the same fake-driver as other tests can # pollute a global method cache in the API that is in the # test runner, resulting in false positives. 'driver_name': 'fake-driverz', 'bios_setting': fake_setting, 'trait': fake_trait, 'volume_target_ident': fake_db_volume_target['uuid'], 'volume_connector_ident': fake_db_volume_connector['uuid'], })