def test_post_live_migrate_server(self, compute_node_get_all_by_host): # Get api samples to server live migrate request. fake_computes = objects.ComputeNodeList(objects=[ objects.ComputeNode(host='testHost', hypervisor_hostname='host') ]) compute_node_get_all_by_host.return_value = fake_computes self.host_attended = None self._check_post_live_migrate_server(req_subs={ 'hostname': self.compute.host, 'force': 'False' })
def setUp(self): super(TestProviderTree, self).setUp() self.compute_node1 = objects.ComputeNode( uuid=uuids.cn1, hypervisor_hostname='compute-node-1', ) self.compute_node2 = objects.ComputeNode( uuid=uuids.cn2, hypervisor_hostname='compute-node-2', ) self.compute_nodes = objects.ComputeNodeList( objects=[self.compute_node1, self.compute_node2], )
def get_compute_nodes_by_host_or_node(self, ctxt, host, node, cell=None): '''Get compute nodes from given host or node''' def return_empty_list_for_not_found(func): def wrapper(*args, **kwargs): try: ret = func(*args, **kwargs) except exception.NotFound: ret = objects.ComputeNodeList() return ret return wrapper @return_empty_list_for_not_found def _get_by_host_and_node(ctxt): compute_node = objects.ComputeNode.get_by_host_and_nodename( ctxt, host, node) return objects.ComputeNodeList(objects=[compute_node]) @return_empty_list_for_not_found def _get_by_host(ctxt): return objects.ComputeNodeList.get_all_by_host(ctxt, host) @return_empty_list_for_not_found def _get_by_node(ctxt): compute_node = objects.ComputeNode.get_by_nodename(ctxt, node) return objects.ComputeNodeList(objects=[compute_node]) if host and node: target_fnc = _get_by_host_and_node elif host: target_fnc = _get_by_host else: target_fnc = _get_by_node if host and not cell: # optimization not to issue queries to every cell DB cell = self._get_cell_by_host(ctxt, host) cells = [cell] if cell else self.enabled_cells timeout = context_module.CELL_TIMEOUT nodes_by_cell = context_module.scatter_gather_cells( ctxt, cells, timeout, target_fnc) # Only one cell should have values for the compute nodes # so we get them here, or return an empty list if no cell # has a value; be sure to filter out cell failures. nodes = next( (nodes for nodes in nodes_by_cell.values() if nodes and not context_module.is_cell_failure_sentinel(nodes)), objects.ComputeNodeList()) return nodes
def from_dict(cls, context, retry_dict): # NOTE(sbauza): We are not persisting the user context since it's only # needed for hydrating the Retry object retry_obj = cls() if not ('num_attempts' and 'hosts') in retry_dict: # NOTE(sbauza): We prefer to return an empty object if the # primitive is not good enough return retry_obj retry_obj.num_attempts = retry_dict.get('num_attempts') # NOTE(sbauza): each retry_dict['hosts'] item is a list of [host, node] computes = [objects.ComputeNode(context=context, host=host, hypervisor_hostname=node) for host, node in retry_dict.get('hosts')] retry_obj.hosts = objects.ComputeNodeList(objects=computes) return retry_obj
def test_aggregate_add_host_bad_placement(self, mock_pc_add_host, mock_get_all_by_host): hostname = 'fake-host' mock_get_all_by_host.return_value = objects.ComputeNodeList(objects=[ objects.ComputeNode(host=hostname, hypervisor_hostname=hostname) ]) mock_pc_add_host.side_effect = exception.PlacementAPIConnectFailure aggregate = self.aggregate_api.create_aggregate( self.ctxt, 'aggregate', None) agg_uuid = aggregate.uuid self.assertRaises(exception.PlacementAPIConnectFailure, self.aggregate_api.add_host_to_aggregate, self.ctxt, aggregate.id, hostname) mock_pc_add_host.assert_called_once_with(self.ctxt, agg_uuid, host_name=hostname)
def test_aggregate_add_host_placement_missing_provider( self, mock_log, mock_pc_add_host, mock_get_all_by_host): hostname = 'fake-host' mock_get_all_by_host.return_value = objects.ComputeNodeList(objects=[ objects.ComputeNode(host=hostname, hypervisor_hostname=hostname) ]) err = exception.ResourceProviderNotFound(name_or_uuid=hostname) mock_pc_add_host.side_effect = err aggregate = self.aggregate_api.create_aggregate( self.ctxt, 'aggregate', None) self.aggregate_api.add_host_to_aggregate(self.ctxt, aggregate.id, hostname) # Nothing should blow up in Rocky, but we should get a warning msg = ("Failed to associate %s with a placement " "aggregate: %s. This may be corrected after running " "nova-manage placement sync_aggregates.") mock_log.assert_called_with(msg, hostname, err)
def test_live_migrate_instance(self, instance_save, _record, _get_spec, get_all_by_host): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) get_all_by_host.return_value = objects.ComputeNodeList( objects=[objects.ComputeNode( host='fake_dest_host', hypervisor_hostname='fake_dest_node')]) self.compute_api.live_migrate(self.context, instance, True, True, 'fake_dest_host') self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
def test_to_legacy_filter_properties_dict(self): fake_numa_limits = objects.NUMATopologyLimits() fake_computes_obj = objects.ComputeNodeList(objects=[ objects.ComputeNode(host='fake1', hypervisor_hostname='node1') ]) fake_dest = objects.Destination(host='fakehost') spec = objects.RequestSpec( ignore_hosts=['ignoredhost'], force_hosts=['fakehost'], force_nodes=['fakenode'], retry=objects.SchedulerRetries(num_attempts=1, hosts=fake_computes_obj), limits=objects.SchedulerLimits(numa_topology=fake_numa_limits, vcpu=1.0, disk_gb=10.0, memory_mb=8192.0), instance_group=objects.InstanceGroup(hosts=['fake1'], policy='affinity', members=['inst1', 'inst2']), scheduler_hints={'foo': ['bar']}, requested_destination=fake_dest) expected = { 'ignore_hosts': ['ignoredhost'], 'force_hosts': ['fakehost'], 'force_nodes': ['fakenode'], 'retry': { 'num_attempts': 1, 'hosts': [['fake1', 'node1']] }, 'limits': { 'numa_topology': fake_numa_limits, 'vcpu': 1.0, 'disk_gb': 10.0, 'memory_mb': 8192.0 }, 'group_updated': True, 'group_hosts': set(['fake1']), 'group_policies': set(['affinity']), 'group_members': set(['inst1', 'inst2']), 'scheduler_hints': { 'foo': 'bar' }, 'requested_destination': fake_dest } self.assertEqual(expected, spec.to_legacy_filter_properties_dict())
def test_server_evacuate(self, compute_node_get_all_by_host, rebuild_mock): # Note (wingwj): The host can't be the same one req_subs = { 'host': 'testHost', "adminPass": "******", } fake_computes = objects.ComputeNodeList( objects=[objects.ComputeNode(host='testHost', hypervisor_hostname='host')]) compute_node_get_all_by_host.return_value = fake_computes self._test_evacuate(req_subs, 'server-evacuate-req', server_resp=None, expected_resp_code=200) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="******", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=None, preserve_ephemeral=mock.ANY, host=None, request_spec=mock.ANY)
def test_detail_compute_host_not_found(self): """Tests that if a service is deleted but the compute node is not we don't fail when listing hypervisors. """ # two computes, a matching service only exists for the first one compute_nodes = objects.ComputeNodeList(objects=[ objects.ComputeNode(**TEST_HYPERS[0]), objects.ComputeNode(**TEST_HYPERS[1]) ]) def fake_service_get_by_compute_host(context, host): if host == TEST_HYPERS[0]['host']: return TEST_SERVICES[0] raise exception.ComputeHostNotFound(host=host) @mock.patch.object(self.controller.host_api, 'compute_node_get_all', return_value=compute_nodes) @mock.patch.object(self.controller.host_api, 'service_get_by_compute_host', fake_service_get_by_compute_host) def _test(self, compute_node_get_all): req = self._get_request(True) result = self.controller.detail(req) self.assertTrue(1, len(result['hypervisors'])) expected = { 'id': compute_nodes[0].id, 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname, 'state': 'up', 'status': 'enabled', } # we don't care about all of the details, just make sure we get # the subset we care about and there are more keys than what index # would return hypervisor = result['hypervisors'][0] self.assertTrue( set(expected.keys()).issubset(set(hypervisor.keys()))) self.assertGreater(len(hypervisor.keys()), len(expected.keys())) self.assertEqual(compute_nodes[0].hypervisor_hostname, hypervisor['hypervisor_hostname']) _test(self)
def test_aggregate_remove_host_bad_placement(self, mock_pc_remove_host, mock_agg_obj_delete_host, mock_get_all_by_host): hostname = 'fake-host' mock_get_all_by_host.return_value = objects.ComputeNodeList(objects=[ objects.ComputeNode(host=hostname, hypervisor_hostname=hostname) ]) mock_pc_remove_host.side_effect = exception.PlacementAPIConnectFailure aggregate = self.aggregate_api.create_aggregate( self.ctxt, 'aggregate', None) agg_uuid = aggregate.uuid self.assertRaises(exception.PlacementAPIConnectFailure, self.aggregate_api.remove_host_from_aggregate, self.ctxt, aggregate.id, hostname) mock_pc_remove_host.assert_called_once_with(self.ctxt, agg_uuid, hostname) # Make sure mock_agg_obj_delete_host wasn't called since placement # should be tried first and failed with a server failure. mock_agg_obj_delete_host.assert_not_called()
def test_init_instance_info(self, mock_spawn, mock_get_all, mock_get_by_filters): mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k) cn1 = objects.ComputeNode(host='host1') cn2 = objects.ComputeNode(host='host2') inst1 = objects.Instance(host='host1', uuid='uuid1') inst2 = objects.Instance(host='host1', uuid='uuid2') inst3 = objects.Instance(host='host2', uuid='uuid3') mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2]) mock_get_by_filters.return_value = objects.InstanceList( objects=[inst1, inst2, inst3]) hm = self.host_manager hm._instance_info = {} hm._init_instance_info() self.assertEqual(len(hm._instance_info), 2) fake_info = hm._instance_info['host1'] self.assertIn('uuid1', fake_info['instances']) self.assertIn('uuid2', fake_info['instances']) self.assertNotIn('uuid3', fake_info['instances'])
def test_init_instance_info(self, mock_get_all, mock_get_by_filters): cn1 = objects.ComputeNode(host='host1') cn2 = objects.ComputeNode(host='host2') inst1 = objects.Instance(host='host1', uuid='uuid1') inst2 = objects.Instance(host='host1', uuid='uuid2') inst3 = objects.Instance(host='host2', uuid='uuid3') mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2]) mock_get_by_filters.return_value = objects.InstanceList( objects=[inst1, inst2, inst3]) hm = self.host_manager hm._instance_info = {} hm._init_instance_info() self.assertEqual(len(hm._instance_info), 2) fake_info = hm._instance_info['host1'] self.assertIn('uuid1', fake_info['instances']) self.assertIn('uuid2', fake_info['instances']) self.assertNotIn('uuid3', fake_info['instances']) exp_filters = {'deleted': False, 'host': [u'host1', u'host2']} mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters)
def test_aggregate_remove_host_placement_missing_provider( self, mock_log, mock_pc_remove_host, mock_agg_obj_delete_host, mock_get_all_by_host): hostname = 'fake-host' mock_get_all_by_host.return_value = objects.ComputeNodeList(objects=[ objects.ComputeNode(host=hostname, hypervisor_hostname=hostname) ]) err = exception.ResourceProviderNotFound(name_or_uuid=hostname) mock_pc_remove_host.side_effect = err aggregate = self.aggregate_api.create_aggregate( self.ctxt, 'aggregate', None) self.aggregate_api.remove_host_from_aggregate(self.ctxt, aggregate.id, hostname) # Nothing should blow up in Rocky, but we should get a warning msg = ("Failed to remove association of %s with a placement " "aggregate: %s.") mock_log.assert_called_with(msg, hostname, err) # In this case Aggregate.delete_host is still called because the # ResourceProviderNotFound error is just logged. mock_agg_obj_delete_host.assert_called_once_with(hostname)
def test_process_use_requested_destination(self): fake_cell = objects.CellMapping(uuid=uuids.cell1, name='foo') destination = objects.Destination(host='fake-host', node='fake-node', cell=fake_cell) fake_nodes = objects.ComputeNodeList(objects=[ objects.ComputeNode(host='fake-host', uuid='12345678-1234-1234-1234-123456789012', hypervisor_hostname='fake-node') ]) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ return_value = fake_nodes flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=15, ephemeral_gb=0, swap=0) fake_spec = objects.RequestSpec(flavor=flavor, requested_destination=destination) expected = utils.ResourceRequest() expected._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ 'VCPU': 1, 'MEMORY_MB': 1024, 'DISK_GB': 15, }, in_tree='12345678-1234-1234-1234-123456789012', ) resources = utils.resources_from_request_spec(self.context, fake_spec, self.mock_host_manager) self.assertResourceRequestsEqual(expected, resources) expected_querystring = ( 'in_tree=12345678-1234-1234-1234-123456789012&' 'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1') self.assertEqual(expected_querystring, resources.to_querystring()) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ assert_called_once_with( self.context, 'fake-host', 'fake-node', cell=fake_cell)
def test_process_use_force_hosts_multinodes_found(self): fake_nodes = objects.ComputeNodeList(objects=[ objects.ComputeNode(host='fake-host', uuid='12345678-1234-1234-1234-123456789012'), objects.ComputeNode(host='fake-host', uuid='87654321-4321-4321-4321-210987654321'), ]) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ return_value = fake_nodes flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=15, ephemeral_gb=0, swap=0) fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test']) expected = utils.ResourceRequest() expected._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ 'VCPU': 1, 'MEMORY_MB': 1024, 'DISK_GB': 15, }, ) # Validate that the limit is unset expected._limit = None resources = utils.resources_from_request_spec(self.context, fake_spec, self.mock_host_manager) self.assertResourceRequestsEqual(expected, resources) # Validate that the limit is unset expected_querystring = ( 'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1') self.assertEqual(expected_querystring, resources.to_querystring()) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ assert_called_once_with(self.context, 'test', None, cell=None)
def _get_by_node(ctxt): compute_node = objects.ComputeNode.get_by_nodename(ctxt, node) return objects.ComputeNodeList(objects=[compute_node])
def wrapper(*args, **kwargs): try: ret = func(*args, **kwargs) except exception.NotFound: ret = objects.ComputeNodeList() return ret