def test_process_use_force_hosts(self): fake_nodes = objects.ComputeNodeList(objects=[ objects.ComputeNode(host='fake-host', uuid='12345678-1234-1234-1234-123456789012') ]) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ return_value = fake_nodes flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=15, ephemeral_gb=0, swap=0) fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test']) expected = utils.ResourceRequest() expected._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ 'VCPU': 1, 'MEMORY_MB': 1024, 'DISK_GB': 15, }, in_tree='12345678-1234-1234-1234-123456789012', ) resources = utils.resources_from_request_spec(self.context, fake_spec, self.mock_host_manager) self.assertResourceRequestsEqual(expected, resources) expected_querystring = ( 'in_tree=12345678-1234-1234-1234-123456789012&' 'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1') self.assertEqual(expected_querystring, resources.to_querystring()) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ assert_called_once_with(self.context, 'test', None, cell=None)
def test_find_destination_with_resource_request( self, mock_setup, mock_reset, mock_select, mock_check, mock_call, mock_fill_provider_mapping, mock_update_pci_req): resource_req = [objects.RequestGroup(requester_id=uuids.port_id)] self.mock_get_res_req.return_value = resource_req self.instance.pci_requests = objects.InstancePCIRequests(requests=[]) self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) # Make sure that requested_resources are added to the request spec self.assertEqual( resource_req, self.task.request_spec.requested_resources) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {uuids.port_id: []}) mock_fill_provider_mapping.assert_called_once_with( self.task.request_spec, fake_selection1) mock_update_pci_req.assert_called_once_with( self.context, self.task.report_client, [], {uuids.port_id: []})
def get_device_profile_groups(self, dp_name): """Get list of profile group objects from the device profile. Cyborg API returns: {"device_profiles": [<device_profile>]} See module notes above for further details. :param dp_name: string: device profile name Expected to be valid, not None or ''. :returns: [objects.RequestGroup] :raises: DeviceProfileError """ dp_list = self._get_device_profile_list(dp_name) if not dp_list: msg = _('Expected 1 device profile but got nothing.') raise exception.DeviceProfileError(name=dp_name, msg=msg) if len(dp_list) != 1: err = _('Expected 1 device profile but got %s.') % len(dp_list) raise exception.DeviceProfileError(name=dp_name, msg=err) dp_groups = dp_list[0]['groups'] request_groups = [] for dp_group_id, dp_group in enumerate(dp_groups): req_id = get_device_profile_group_requester_id(dp_group_id) rg = objects.RequestGroup(requester_id=req_id) for key, val in dp_group.items(): match = schedutils.ResourceRequest.XS_KEYPAT.match(key) if not match: continue # could be 'accel:foo=bar', skip it prefix, _ignore, name = match.groups() if prefix == schedutils.ResourceRequest.XS_RES_PREFIX: rg.add_resource(rclass=name, amount=val) elif prefix == schedutils.ResourceRequest.XS_TRAIT_PREFIX: rg.add_trait(trait_name=name, trait_type=val) request_groups.append(rg) return request_groups
def test_resources_from_request_spec_having_requested_resources(self): flavor = objects.Flavor( vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0) rg1 = objects.RequestGroup() rg2 = objects.RequestGroup() reqspec = objects.RequestSpec(flavor=flavor, requested_resources=[rg1, rg2]) req = utils.resources_from_request_spec(reqspec) self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1}, req.get_request_group(None).resources) self.assertIs(rg1, req.get_request_group(1)) self.assertIs(rg2, req.get_request_group(2))
def test_resources_from_request_spec_no_limit_based_on_hint(self, hints): """Tests that there is no limit applied to the GET /allocation_candidates query string if a given scheduler hint is in the request spec. """ flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=15, ephemeral_gb=0, swap=0) fake_spec = objects.RequestSpec( flavor=flavor, scheduler_hints=hints) expected = utils.ResourceRequest() expected._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ 'VCPU': 1, 'MEMORY_MB': 1024, 'DISK_GB': 15, }, ) expected._limit = None resources = utils.resources_from_request_spec(fake_spec) self.assertResourceRequestsEqual(expected, resources) expected_querystring = ( 'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1' ) self.assertEqual(expected_querystring, resources.to_querystring())
def test_resources_from_request_spec_with_no_disk(self): flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=0, ephemeral_gb=0, swap=0) expected_resources = utils.ResourceRequest() expected_resources._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ 'VCPU': 1, 'MEMORY_MB': 1024, }) self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_host_supporting_request_first_host_is_new( self, mock_get_service, mock_delete_allocation, mock_claim_resources): self.request_spec.requested_resources = [objects.RequestGroup()] task = self._generate_task() resources = { "resources": { "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100 } } first = objects.Selection(service_host="host1", nodename="node1", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host1: resources } }), allocation_request_version='1.19') alternate = objects.Selection(service_host="host2", nodename="node2", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host2: resources } }), allocation_request_version='1.19') selection_list = [first, alternate] first_service = objects.Service(service_host='host1') first_service.version = 39 mock_get_service.return_value = first_service selected, alternates = task._get_host_supporting_request( selection_list) self.assertEqual(first, selected) self.assertEqual([alternate], alternates) mock_get_service.assert_called_once_with(task.context, 'host1', 'nova-compute') # The first host was good and the scheduler made allocation on that # host. So we don't expect any resource claim manipulation mock_delete_allocation.assert_not_called() mock_claim_resources.assert_not_called()
def test_get_resources_from_request_spec_remove_flavor_amounts(self): flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0, extra_specs={ "resources:VCPU": 0, "resources:DISK_GB": 0 }) expected_resources = utils.ResourceRequest() expected_resources._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ "MEMORY_MB": 1024, }) self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_custom_resource_class(self): flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0, extra_specs={"resources:CUSTOM_TEST_CLASS": 1}) expected_resources = utils.ResourceRequest() expected_resources._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 15, "CUSTOM_TEST_CLASS": 1, }) self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_flavor_forbidden_trait(self): flavor = objects.Flavor( vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0, extra_specs={'trait:CUSTOM_FLAVOR_TRAIT': 'forbidden'}) expected_resources = utils.ResourceRequest() expected_resources._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ 'VCPU': 1, 'MEMORY_MB': 1024, 'DISK_GB': 15, }, forbidden_traits={ 'CUSTOM_FLAVOR_TRAIT', }) self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_vgpu(self): flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=0, swap=0, extra_specs={ "resources:VGPU": 1, "resources:VGPU_DISPLAY_HEAD": 1 }) expected_resources = utils.ResourceRequest() expected_resources._rg_by_id[None] = objects.RequestGroup( use_same_provider=False, resources={ "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 10, "VGPU": 1, "VGPU_DISPLAY_HEAD": 1, }) self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_host_supporting_request_first_host_is_old_no_alternates( self, mock_get_service, mock_delete_allocation, mock_claim_resources): self.request_spec.requested_resources = [objects.RequestGroup()] task = self._generate_task() resources = { "resources": { "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100 } } first = objects.Selection(service_host="host1", nodename="node1", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host1: resources } }), allocation_request_version='1.19') selection_list = [first] first_service = objects.Service(service_host='host1') first_service.version = 38 mock_get_service.return_value = first_service self.assertRaises(exception.MaxRetriesExceeded, task._get_host_supporting_request, selection_list) mock_get_service.assert_called_once_with(task.context, 'host1', 'nova-compute') mock_delete_allocation.assert_called_once_with(task.context, self.instance.uuid) mock_claim_resources.assert_not_called()
def test_reschedule_old_computes_no_more_alternates( self, mock_get_service, mock_claim_resources, mock_fill_mapping, mock_debug): self.request_spec.requested_resources = [objects.RequestGroup()] task = self._generate_task() resources = { "resources": { "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100 } } first = objects.Selection(service_host="host1", nodename="node1", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host1: resources } }), allocation_request_version='1.19') second = objects.Selection(service_host="host2", nodename="node2", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host2: resources } }), allocation_request_version='1.19') first_service = objects.Service(service_host='host1') first_service.version = 38 second_service = objects.Service(service_host='host2') second_service.version = 38 mock_get_service.side_effect = [first_service, second_service] # set up task for re-schedule task.host_list = [first, second] self.assertRaises(exception.MaxRetriesExceeded, task._reschedule) self.assertEqual([], task.host_list) mock_get_service.assert_has_calls([ mock.call(task.context, 'host1', 'nova-compute'), mock.call(task.context, 'host2', 'nova-compute'), ]) mock_claim_resources.assert_not_called() mock_fill_mapping.assert_not_called() mock_debug.assert_has_calls([ mock.call( 'Scheduler returned alternate host %(host)s as a possible ' 'migration target for re-schedule but that host is not ' 'new enough to support the migration with resource ' 'request %(request)s. Trying another alternate.', { 'host': 'host1', 'request': self.request_spec.requested_resources }, instance=self.instance), mock.call( 'Scheduler returned alternate host %(host)s as a possible ' 'migration target for re-schedule but that host is not ' 'new enough to support the migration with resource ' 'request %(request)s. Trying another alternate.', { 'host': 'host2', 'request': self.request_spec.requested_resources }, instance=self.instance), ])
def test_get_host_supporting_request_both_first_and_second_too_old( self, mock_get_service, mock_delete_allocation, mock_claim_resources, mock_debug): self.request_spec.requested_resources = [objects.RequestGroup()] task = self._generate_task() resources = { "resources": { "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100 } } first = objects.Selection(service_host="host1", nodename="node1", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host1: resources } }), allocation_request_version='1.19') second = objects.Selection(service_host="host2", nodename="node2", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host2: resources } }), allocation_request_version='1.19') third = objects.Selection(service_host="host3", nodename="node3", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host3: resources } }), allocation_request_version='1.19') fourth = objects.Selection(service_host="host4", nodename="node4", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host4: resources } }), allocation_request_version='1.19') selection_list = [first, second, third, fourth] first_service = objects.Service(service_host='host1') first_service.version = 38 second_service = objects.Service(service_host='host2') second_service.version = 38 third_service = objects.Service(service_host='host3') third_service.version = 39 mock_get_service.side_effect = [ first_service, second_service, third_service ] # not called for the first and second hosts but called for the third mock_claim_resources.side_effect = [True] selected, alternates = task._get_host_supporting_request( selection_list) self.assertEqual(third, selected) self.assertEqual([fourth], alternates) mock_get_service.assert_has_calls([ mock.call(task.context, 'host1', 'nova-compute'), mock.call(task.context, 'host2', 'nova-compute'), mock.call(task.context, 'host3', 'nova-compute'), ]) mock_delete_allocation.assert_called_once_with(task.context, self.instance.uuid) mock_claim_resources.assert_called_once_with( self.context, task.reportclient, task.request_spec, self.instance.uuid, {"allocations": { uuids.host3: resources }}, '1.19') mock_debug.assert_has_calls([ mock.call( 'Scheduler returned host %(host)s as a possible migration ' 'target but that host is not new enough to support the ' 'migration with resource request %(request)s or the compute ' 'RPC is pinned to less than 5.2. Trying alternate hosts.', { 'host': 'host1', 'request': self.request_spec.requested_resources }, instance=self.instance), mock.call( 'Scheduler returned alternate host %(host)s as a possible ' 'migration target but that host is not new enough to support ' 'the migration with resource request %(request)s or the ' 'compute RPC is pinned to less than 5.2. Trying another ' 'alternate.', { 'host': 'host2', 'request': self.request_spec.requested_resources }, instance=self.instance), ])
def get_request_group(self, ident): if ident not in self._rg_by_id: rq_grp = objects.RequestGroup(use_same_provider=bool(ident)) self._rg_by_id[ident] = rq_grp return self._rg_by_id[ident]
def test_reschedule_old_compute_skipped(self, mock_get_service, mock_claim_resources, mock_debug): self.request_spec.requested_resources = [ objects.RequestGroup(requester_id=uuids.port1) ] task = self._generate_task() resources = { "resources": { "VCPU": 1, "MEMORY_MB": 1024, "DISK_GB": 100 } } first = objects.Selection(service_host="host1", nodename="node1", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host1: resources }, "mappings": { uuids.port1: [uuids.host1] } }), allocation_request_version='1.35') second = objects.Selection(service_host="host2", nodename="node2", cell_uuid=uuids.cell1, allocation_request=jsonutils.dumps({ "allocations": { uuids.host2: resources }, "mappings": { uuids.port1: [uuids.host2] } }), allocation_request_version='1.35') first_service = objects.Service(service_host='host1') first_service.version = 38 second_service = objects.Service(service_host='host2') second_service.version = 39 mock_get_service.side_effect = [first_service, second_service] # set up task for re-schedule task.host_list = [first, second] selected = task._reschedule() self.assertEqual(second, selected) self.assertEqual([], task.host_list) mock_get_service.assert_has_calls([ mock.call(task.context, 'host1', 'nova-compute'), mock.call(task.context, 'host2', 'nova-compute'), ]) mock_claim_resources.assert_called_once_with( self.context.elevated(), task.reportclient, task.request_spec, self.instance.uuid, { "allocations": { uuids.host2: resources }, "mappings": { uuids.port1: [uuids.host2] } }, '1.35') mock_debug.assert_has_calls([ mock.call( 'Scheduler returned alternate host %(host)s as a possible ' 'migration target for re-schedule but that host is not ' 'new enough to support the migration with resource ' 'request %(request)s. Trying another alternate.', { 'host': 'host1', 'request': self.request_spec.requested_resources }, instance=self.instance), ])