Пример #1
0
    def test_resources_from_request_spec_aggregates(self):
        destination = objects.Destination()
        flavor = objects.Flavor(vcpus=1, memory_mb=1024,
                                root_gb=1, ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor,
                                      requested_destination=destination)

        destination.require_aggregates(['foo', 'bar'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([('foo', 'bar',)],
                         req.get_request_group(None).member_of)

        destination.require_aggregates(['baz'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([('foo', 'bar'), ('baz',)],
                         req.get_request_group(None).member_of)

        # Test stringification
        self.assertEqual(
            'RequestGroup(use_same_provider=False, '
            'resources={DISK_GB:1, MEMORY_MB:1024, VCPU:1}, '
            'traits=[], '
            'aggregates=[[baz], [foo, bar]])',
            str(req))
Пример #2
0
    def test_resources_from_request_spec_no_aggregates(self):
        flavor = objects.Flavor(vcpus=1,
                                memory_mb=1024,
                                root_gb=1,
                                ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor)

        req = utils.resources_from_request_spec(self.context, reqspec,
                                                self.mock_host_manager)
        self.assertEqual([], req.get_request_group(None).aggregates)

        reqspec.requested_destination = None
        req = utils.resources_from_request_spec(self.context, reqspec,
                                                self.mock_host_manager)
        self.assertEqual([], req.get_request_group(None).aggregates)

        reqspec.requested_destination = objects.Destination()
        req = utils.resources_from_request_spec(self.context, reqspec,
                                                self.mock_host_manager)
        self.assertEqual([], req.get_request_group(None).aggregates)

        reqspec.requested_destination.aggregates = None
        req = utils.resources_from_request_spec(self.context, reqspec,
                                                self.mock_host_manager)
        self.assertEqual([], req.get_request_group(None).aggregates)
Пример #3
0
    def test_resources_from_request_spec_requested_resources_unfilled(self):
        flavor = objects.Flavor(vcpus=1,
                                memory_mb=1024,
                                root_gb=10,
                                ephemeral_gb=5,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor)
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual({
            'MEMORY_MB': 1024,
            'DISK_GB': 15,
            'VCPU': 1
        },
                         req.get_request_group(None).resources)
        self.assertEqual(1, len(list(req.resource_groups())))

        reqspec = objects.RequestSpec(flavor=flavor, requested_resources=[])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual({
            'MEMORY_MB': 1024,
            'DISK_GB': 15,
            'VCPU': 1
        },
                         req.get_request_group(None).resources)
        self.assertEqual(1, len(list(req.resource_groups())))
Пример #4
0
    def test_resources_from_request_spec_aggregates(self):
        destination = objects.Destination()
        flavor = objects.Flavor(vcpus=1,
                                memory_mb=1024,
                                root_gb=1,
                                ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor,
                                      requested_destination=destination)

        destination.require_aggregates(['foo', 'bar'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([(
            'foo',
            'bar',
        )],
                         req.get_request_group(None).member_of)

        destination.require_aggregates(['baz'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([('foo', 'bar'), ('baz', )],
                         req.get_request_group(None).member_of)

        # Test stringification
        self.assertEqual(
            'RequestGroup(use_same_provider=False, '
            'resources={DISK_GB:1, MEMORY_MB:1024, VCPU:1}, '
            'traits=[], '
            'aggregates=[[baz], [foo, bar]])', str(req))
Пример #5
0
 def test_process_extra_specs_granular_not_called(self, mock_proc):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor)
     utils.resources_from_request_spec(fake_spec)
     mock_proc.assert_not_called()
Пример #6
0
 def test_process_extra_specs_not_called(self, mock_proc):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor)
     utils.resources_from_request_spec(fake_spec)
     mock_proc.assert_not_called()
Пример #7
0
 def test_process_extra_specs_granular_called(self, mock_proc):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
     fake_spec = objects.RequestSpec(flavor=flavor)
     utils.resources_from_request_spec(fake_spec)
     mock_proc.assert_called_once()
Пример #8
0
 def test_process_missing_extra_specs_value(self):
     flavor = objects.Flavor(
             vcpus=1,
             memory_mb=1024,
             root_gb=10,
             ephemeral_gb=5,
             swap=0,
             extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
     fake_spec = objects.RequestSpec(flavor=flavor)
     utils.resources_from_request_spec(fake_spec)
Пример #9
0
 def test_process_missing_extra_specs_value(self):
     flavor = objects.Flavor(
         vcpus=1,
         memory_mb=1024,
         root_gb=10,
         ephemeral_gb=5,
         swap=0,
         extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
     fake_spec = objects.RequestSpec(flavor=flavor)
     utils.resources_from_request_spec(fake_spec)
Пример #10
0
 def test_process_extra_specs_called(self, mock_proc):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
     fake_spec = objects.RequestSpec(flavor=flavor)
     utils.resources_from_request_spec(fake_spec)
     mock_proc.assert_called_once()
Пример #11
0
 def test_get_resources_from_request_spec_bad_value(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={"resources:MEMORY_MB": "bogus"})
     fake_spec = objects.RequestSpec(flavor=flavor)
     with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
         utils.resources_from_request_spec(fake_spec)
         mock_log.assert_called_once()
Пример #12
0
 def test_get_resources_from_request_spec_zero_cust_amt(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={"resources:CUSTOM_TEST_CLASS": 0})
     fake_spec = objects.RequestSpec(flavor=flavor)
     with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
         utils.resources_from_request_spec(fake_spec)
         mock_log.assert_called_once()
Пример #13
0
 def test_get_resources_from_request_spec_zero_cust_amt(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={
                                 "resources:CUSTOM_TEST_CLASS": 0})
     fake_spec = objects.RequestSpec(flavor=flavor)
     with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
         utils.resources_from_request_spec(fake_spec)
         mock_log.assert_called_once()
Пример #14
0
 def test_get_resources_from_request_spec_bad_value(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={
                                 "resources:MEMORY_MB": "bogus"})
     fake_spec = objects.RequestSpec(flavor=flavor)
     with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
         utils.resources_from_request_spec(fake_spec)
         mock_log.assert_called_once()
Пример #15
0
 def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
     """Tests that there is no limit applied to the
     GET /allocation_candidates query string if a given scheduler hint
     is in the request spec.
     """
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(
         flavor=flavor, scheduler_hints=hints)
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = objects.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
     )
     expected._limit = None
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
     )
     self.assertEqual(expected_querystring, resources.to_querystring())
Пример #16
0
 def test_get_resources_from_request_spec_bad_std_resource_class(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={
                                 "resources:DOESNT_EXIST": 0})
     fake_spec = objects.RequestSpec(flavor=flavor)
     with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
         utils.resources_from_request_spec(fake_spec)
         mock_log.assert_called_once()
         args = mock_log.call_args[0]
         self.assertEqual(args[0], "Received an invalid ResourceClass "
                 "'%(key)s' in extra_specs.")
         self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
Пример #17
0
 def test_get_resources_from_request_spec_bad_std_resource_class(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=10,
                             ephemeral_gb=5,
                             swap=0,
                             extra_specs={"resources:DOESNT_EXIST": 0})
     fake_spec = objects.RequestSpec(flavor=flavor)
     with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
         utils.resources_from_request_spec(fake_spec)
         mock_log.assert_called_once()
         args = mock_log.call_args[0]
         self.assertEqual(
             args[0], "Received an invalid ResourceClass "
             "'%(key)s' in extra_specs.")
         self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
Пример #18
0
 def _test_resources_from_request_spec(self, expected, flavor,
                                       image=objects.ImageMeta()):
     fake_spec = objects.RequestSpec(flavor=flavor, image=image)
     resources = utils.resources_from_request_spec(
         self.context, fake_spec, self.mock_host_manager)
     self.assertResourceRequestsEqual(expected, resources)
     return resources
Пример #19
0
 def _test_resources_from_request_spec(self,
                                       expected,
                                       flavor,
                                       image=objects.ImageMeta()):
     fake_spec = objects.RequestSpec(flavor=flavor, image=image)
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertResourceRequestsEqual(expected, resources)
Пример #20
0
 def test_process_use_force_hosts(self):
     fake_nodes = objects.ComputeNodeList(objects=[
         objects.ComputeNode(host='fake-host',
                             uuid='12345678-1234-1234-1234-123456789012')
     ])
     self.mock_host_manager.get_compute_nodes_by_host_or_node.\
         return_value = fake_nodes
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = objects.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
         in_tree='12345678-1234-1234-1234-123456789012',
     )
     resources = utils.resources_from_request_spec(self.context, fake_spec,
                                                   self.mock_host_manager)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'in_tree=12345678-1234-1234-1234-123456789012&'
         'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
     self.assertEqual(expected_querystring, resources.to_querystring())
     self.mock_host_manager.get_compute_nodes_by_host_or_node.\
         assert_called_once_with(self.context, 'test', None, cell=None)
Пример #21
0
 def test_process_use_force_hosts(self):
     fake_nodes = objects.ComputeNodeList(objects=[
         objects.ComputeNode(host='test',
                             uuid='12345678-1234-1234-1234-123456789012')
         ])
     self.mock_host_manager.get_compute_nodes_by_host_or_node.\
         return_value = fake_nodes
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = objects.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
         in_tree='12345678-1234-1234-1234-123456789012',
     )
     resources = utils.resources_from_request_spec(
             self.context, fake_spec, self.mock_host_manager)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'in_tree=12345678-1234-1234-1234-123456789012&'
         'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
     self.assertEqual(expected_querystring, resources.to_querystring())
     self.mock_host_manager.get_compute_nodes_by_host_or_node.\
         assert_called_once_with(self.context, 'test', None, cell=None)
Пример #22
0
 def _test_resources_from_request_spec(self, expected, flavor,
                                       image=objects.ImageMeta()):
     fake_spec = objects.RequestSpec(flavor=flavor, image=image)
     resources = utils.resources_from_request_spec(
         self.context, fake_spec, self.mock_host_manager)
     self.assertResourceRequestsEqual(expected, resources)
     return resources
Пример #23
0
 def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
     """Tests that there is no limit applied to the
     GET /allocation_candidates query string if a given scheduler hint
     is in the request spec.
     """
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(
         flavor=flavor, scheduler_hints=hints)
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = objects.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
     )
     expected._limit = None
     resources = utils.resources_from_request_spec(
         self.context, fake_spec, self.mock_host_manager)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
     )
     self.assertEqual(expected_querystring, resources.to_querystring())
Пример #24
0
    def test_resources_from_request_spec_aggregates(self):
        destination = objects.Destination()
        flavor = objects.Flavor(vcpus=1, memory_mb=1024,
                                root_gb=1, ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor,
                                      requested_destination=destination)

        destination.require_aggregates(['foo', 'bar'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([['foo', 'bar']],
                         req.get_request_group(None).aggregates)

        destination.require_aggregates(['baz'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([['foo', 'bar'], ['baz']],
                         req.get_request_group(None).aggregates)
Пример #25
0
    def test_resources_from_request_spec_aggregates(self):
        destination = objects.Destination()
        flavor = objects.Flavor(vcpus=1, memory_mb=1024,
                                root_gb=1, ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor,
                                      requested_destination=destination)

        destination.require_aggregates(['foo', 'bar'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([['foo', 'bar']],
                         req.get_request_group(None).aggregates)

        destination.require_aggregates(['baz'])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([['foo', 'bar'], ['baz']],
                         req.get_request_group(None).aggregates)
Пример #26
0
    def test_resources_from_request_spec_requested_resources_unfilled(self):
        flavor = objects.Flavor(
                vcpus=1,
                memory_mb=1024,
                root_gb=10,
                ephemeral_gb=5,
                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor)
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
                         req.get_request_group(None).resources)
        self.assertEqual(1, len(list(req.resource_groups())))

        reqspec = objects.RequestSpec(flavor=flavor, requested_resources=[])
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
                         req.get_request_group(None).resources)
        self.assertEqual(1, len(list(req.resource_groups())))
Пример #27
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries = None, None
            else:
                alloc_reqs, provider_summaries = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")
                # TODO(jaypipes): Setting provider_summaries to None triggers
                # the scheduler to load all compute nodes to do scheduling "the
                # old way".  Really, we should raise NoValidHosts here, but all
                # functional tests will fall over if we do that without
                # changing the PlacementFixture to load compute node inventory
                # into the placement database before starting functional tests.
                provider_summaries = None
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rr in ar['allocations']:
                        rp_uuid = rr['resource_provider']['uuid']
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
                                                alloc_reqs_by_rp_uuid,
                                                provider_summaries)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Пример #28
0
 def test_process_no_force_hosts_or_force_nodes(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor)
     expected = utils.ResourceRequest()
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertEqual(expected._limit, resources._limit)
Пример #29
0
    def test_resources_from_request_spec_no_aggregates(self):
        flavor = objects.Flavor(vcpus=1, memory_mb=1024,
                                root_gb=1, ephemeral_gb=0,
                                swap=0)
        reqspec = objects.RequestSpec(flavor=flavor)

        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).member_of)

        reqspec.requested_destination = None
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).member_of)

        reqspec.requested_destination = objects.Destination()
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).member_of)

        reqspec.requested_destination.aggregates = None
        req = utils.resources_from_request_spec(reqspec)
        self.assertEqual([], req.get_request_group(None).member_of)
Пример #30
0
    def select_destinations(self, ctxt,
                            request_spec=None, filter_properties=None,
                            spec_obj=_sentinel, instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(ctxt,
                                                           request_spec,
                                                           filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries = None, None
            else:
                alloc_reqs, provider_summaries = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")
                # TODO(jaypipes): Setting provider_summaries to None triggers
                # the scheduler to load all compute nodes to do scheduling "the
                # old way".  Really, we should raise NoValidHosts here, but all
                # functional tests will fall over if we do that without
                # changing the PlacementFixture to load compute node inventory
                # into the placement database before starting functional tests.
                provider_summaries = None
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rr in ar['allocations']:
                        rp_uuid = rr['resource_provider']['uuid']
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
            alloc_reqs_by_rp_uuid, provider_summaries)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Пример #31
0
 def _get_all_host_states(self, context, spec_obj):
     """Template method, so a subclass can implement caching."""
     resources = utils.resources_from_request_spec(spec_obj)
     filters = {'resources': resources}
     reportclient = self.scheduler_client.reportclient
     rps = reportclient.get_filtered_resource_providers(filters)
     # NOTE(sbauza): In case the Placement service is not running yet or
     # when returning an exception, we wouldn't get any resource providers.
     # If so, let's return an empty list so _schedule would raise a
     # NoValidHosts.
     if not rps:
         return []
     compute_uuids = [rp['uuid'] for rp in rps]
     return self.host_manager.get_host_states_by_uuids(
         context, compute_uuids, spec_obj)
Пример #32
0
 def test_resources_from_request_spec_having_requested_resources(self):
     flavor = objects.Flavor(
             vcpus=1,
             memory_mb=1024,
             root_gb=10,
             ephemeral_gb=5,
             swap=0)
     rg1 = objects.RequestGroup()
     rg2 = objects.RequestGroup()
     reqspec = objects.RequestSpec(flavor=flavor,
                                   requested_resources=[rg1, rg2])
     req = utils.resources_from_request_spec(reqspec)
     self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
                      req.get_request_group(None).resources)
     self.assertIs(rg1, req.get_request_group(1))
     self.assertIs(rg2, req.get_request_group(2))
Пример #33
0
 def test_resources_from_request_spec_having_requested_resources(self):
     flavor = objects.Flavor(
             vcpus=1,
             memory_mb=1024,
             root_gb=10,
             ephemeral_gb=5,
             swap=0)
     rg1 = objects.RequestGroup()
     rg2 = objects.RequestGroup()
     reqspec = objects.RequestSpec(flavor=flavor,
                                   requested_resources=[rg1, rg2])
     req = utils.resources_from_request_spec(reqspec)
     self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
                      req.get_request_group(None).resources)
     self.assertIs(rg1, req.get_request_group(1))
     self.assertIs(rg2, req.get_request_group(2))
Пример #34
0
 def test_process_use_force_hosts(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = objects.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
     )
     expected._limit = None
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
     self.assertEqual(expected_querystring, resources.to_querystring())
Пример #35
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)
        alloc_reqs, p_sums = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(resources)
            alloc_reqs, p_sums = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")
                # TODO(jaypipes): Setting p_sums to None triggers the scheduler
                # to load all compute nodes to do scheduling "the old way".
                # Really, we should raise NoValidHosts here, but all functional
                # tests will fall over if we do that without changing the
                # PlacementFixture to load compute node inventory into the
                # placement database before starting functional tests.
                p_sums = None

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
                                                p_sums)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Пример #36
0
 def test_process_use_force_hosts(self):
     flavor = objects.Flavor(vcpus=1,
                             memory_mb=1024,
                             root_gb=15,
                             ephemeral_gb=0,
                             swap=0)
     fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
     expected = utils.ResourceRequest()
     expected._rg_by_id[None] = plib.RequestGroup(
         use_same_provider=False,
         resources={
             'VCPU': 1,
             'MEMORY_MB': 1024,
             'DISK_GB': 15,
         },
     )
     expected._limit = None
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertResourceRequestsEqual(expected, resources)
     expected_querystring = (
         'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
     )
     self.assertEqual(expected_querystring, resources.to_querystring())
Пример #37
0
    def select_destinations(self, ctxt, request_spec=None,
            filter_properties=None, spec_obj=_sentinel, instance_uuids=None,
            return_objects=False, return_alternates=False):
        """Returns destinations(s) best suited for this RequestSpec.

        Starting in Queens, this method returns a list of lists of Selection
        objects, with one list for each requested instance. Each instance's
        list will have its first element be the Selection object representing
        the chosen host for the instance, and if return_alternates is True,
        zero or more alternate objects that could also satisfy the request. The
        number of alternates is determined by the configuration option
        `CONF.scheduler.max_attempts`.

        The ability of a calling method to handle this format of returned
        destinations is indicated by a True value in the parameter
        `return_objects`. However, there may still be some older conductors in
        a deployment that have not been updated to Queens, and in that case
        return_objects will be False, and the result will be a list of dicts
        with 'host', 'nodename' and 'limits' as keys. When return_objects is
        False, the value of return_alternates has no effect. The reason there
        are two kwarg parameters return_objects and return_alternates is so we
        can differentiate between callers that understand the Selection object
        format but *don't* want to get alternate hosts, as is the case with the
        conductors that handle certain move operations.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(ctxt,
                                                           request_spec,
                                                           filter_properties)

        is_rebuild = utils.request_is_rebuild(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        if self.driver.USES_ALLOCATION_CANDIDATES and not is_rebuild:
            # Only process the Placement request spec filters when Placement
            # is used.
            try:
                request_filter.process_reqspec(ctxt, spec_obj)
            except exception.RequestFilterFailed as e:
                raise exception.NoValidHost(reason=e.message)

            resources = utils.resources_from_request_spec(
                ctxt, spec_obj, self.driver.host_manager)
            res = self.placement_client.get_allocation_candidates(ctxt,
                                                                  resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries, allocation_request_version = (
                        None, None, None)
            else:
                (alloc_reqs, provider_summaries,
                            allocation_request_version) = res
            if not alloc_reqs:
                LOG.info("Got no allocation candidates from the Placement "
                         "API. This could be due to insufficient resources "
                         "or a temporary occurrence as compute nodes start "
                         "up.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rp_uuid in ar['allocations']:
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        # Only return alternates if both return_objects and return_alternates
        # are True.
        return_alternates = return_alternates and return_objects
        selections = self.driver.select_destinations(ctxt, spec_obj,
                instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries,
                allocation_request_version, return_alternates)
        # If `return_objects` is False, we need to convert the selections to
        # the older format, which is a list of host state dicts.
        if not return_objects:
            selection_dicts = [sel[0].to_dict() for sel in selections]
            return jsonutils.to_primitive(selection_dicts)
        return selections
Пример #38
0
    def select_destinations(self, context, spec_obj, instance_uuids,
                            alloc_reqs_by_rp_uuid, provider_summaries):
        """Returns a sorted list of HostState objects that satisfy the
        supplied request_spec.

        These hosts will have already had their resources claimed in Placement.

        :param context: The RequestContext object
        :param spec_obj: The RequestSpec object
        :param instance_uuids: List of UUIDs, one for each value of the spec
                               object's num_instances attribute
        :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
                                      UUID, of the allocation requests that may
                                      be used to claim resources against
                                      matched hosts. If None, indicates either
                                      the placement API wasn't reachable or
                                      that there were no allocation requests
                                      returned by the placement API. If the
                                      latter, the provider_summaries will be an
                                      empty dict, not None.
        :param provider_summaries: Optional dict, keyed by resource provider
                                   UUID, of information that will be used by
                                   the filters/weighers in selecting matching
                                   hosts for a request. If None, indicates that
                                   the scheduler driver should grab all compute
                                   node information locally and that the
                                   Placement API is not used. If an empty dict,
                                   indicates the Placement API returned no
                                   potential matches for the requested
                                   resources.
        """
        self.notifier.info(
            context, 'scheduler.select_destinations.start',
            dict(request_spec=spec_obj.to_legacy_request_spec_dict()))

        # NOTE(sbauza): The RequestSpec.num_instances field contains the number
        # of instances created when the RequestSpec was used to first boot some
        # instances. This is incorrect when doing a move or resize operation,
        # so prefer the length of instance_uuids unless it is None.
        num_instances = (len(instance_uuids)
                         if instance_uuids else spec_obj.num_instances)

        # WRS: check against minimum number of instances for success if set
        #      otherwise default to num_instances
        if hasattr(spec_obj, 'min_num_instances'):
            task_state = spec_obj.scheduler_hints.get('task_state')
            # task_state set to None indicates this is not for migration
            if task_state is None:
                num_instances = spec_obj.min_num_instances

        selected_hosts = self._schedule(context, spec_obj, instance_uuids,
                                        alloc_reqs_by_rp_uuid,
                                        provider_summaries)

        # Couldn't fulfill the request_spec
        if len(selected_hosts) < num_instances:
            # NOTE(Rui Chen): If multiple creates failed, set the updated time
            # of selected HostState to None so that these HostStates are
            # refreshed according to database in next schedule, and release
            # the resource consumed by instance in the process of selecting
            # host.
            for host in selected_hosts:
                host.updated = None

            # Log the details but don't put those into the reason since
            # we don't want to give away too much information about our
            # actual environment.
            LOG.debug(
                'There are %(hosts)d hosts available but '
                '%(num_instances)d instances requested to build.', {
                    'hosts': len(selected_hosts),
                    'num_instances': num_instances
                })

            # Determine normalized resource allocation request required to do
            # placement query.
            resources = scheduler_utils.resources_from_request_spec(spec_obj)
            empty_computenode = objects.ComputeNode(
                numa_topology=objects.NUMATopology(
                    cells=[objects.NUMACell(siblings=[])])._to_json())
            normalized_resources = \
                scheduler_utils.normalized_resources_for_placement_claim(
                    resources, empty_computenode,
                    spec_obj.flavor.vcpus,
                    spec_obj.flavor.extra_specs,
                    spec_obj.image.properties,
                    spec_obj.numa_topology)

            # Determine the rejection reasons for all hosts based on
            # placement vcpu, memory, and disk criteria. This is done
            # after-the-fact since the placement query does not return
            # any reasons.
            reasons = self.placement_client.get_rejection_reasons(
                requested=normalized_resources)
            if reasons is None:
                reasons = {}

            # Populate per-host rejection map based on placement criteria.
            host_states = self.host_manager.get_all_host_states(context)
            for host_state in host_states:
                if host_state.uuid in reasons:
                    msg = reasons[host_state.uuid]
                    if msg:
                        utils.filter_reject('Placement',
                                            host_state,
                                            spec_obj,
                                            msg,
                                            append=False)

            # WRS - failure message
            pp = pprint.PrettyPrinter(indent=1)
            spec_ = {
                k.lstrip('_obj_'): v
                for k, v in (spec_obj.__dict__).items()
                if k.startswith('_obj_')
            }
            LOG.warning(
                'CANNOT SCHEDULE:  %(num)s available out of '
                '%(req)s requested.  spec_obj=\n%(spec)s', {
                    'num': len(selected_hosts),
                    'req': num_instances,
                    'spec': pp.pformat(spec_),
                })
            reason = _('There are not enough hosts available.')
            filter_properties = spec_obj.to_legacy_filter_properties_dict()
            scheduler_utils.NoValidHost_extend(filter_properties,
                                               reason=reason)
        else:
            # WRS - success message
            LOG.info(
                'SCHED: PASS. Selected %(hosts)s, uuid=%(uuid)s, '
                'name=%(name)s, display_name=%(display_name)s, '
                'scheduled=%(num)s', {
                    'hosts': selected_hosts,
                    'uuid': spec_obj.instance_uuid,
                    'name': spec_obj.name,
                    'display_name': spec_obj.display_name,
                    'num': len(selected_hosts)
                })

        self.notifier.info(
            context, 'scheduler.select_destinations.end',
            dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
        return selected_hosts
Пример #39
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None):
        """Returns destinations(s) best suited for this RequestSpec.

        The result should be a list of dicts with 'host', 'nodename' and
        'limits' as keys.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)
        resources = utils.resources_from_request_spec(spec_obj)

        # WRS: Determine resources consumed for placement candidate check,
        vcpus = spec_obj.flavor.vcpus
        extra_specs = spec_obj.flavor.extra_specs
        image_props = spec_obj.image.properties

        # WRS: The request_spec has stale numa_topology, so must be updated.
        # We can get stale numa_topology if we do an evacuation or
        # live-migration after a resize,
        instance_type = spec_obj.flavor
        image_meta = objects.ImageMeta(properties=image_props)
        try:
            spec_obj.numa_topology = \
                hardware.numa_get_constraints(instance_type, image_meta)
        except Exception as ex:
            LOG.error("Cannot get numa constraints, error=%(err)r",
                      {'err': ex})

        instance_numa_topology = spec_obj.numa_topology
        # WRS: If cpu_thread_policy is ISOLATE and compute has hyperthreading
        # enabled, vcpus claim will be double flavor.vcpus.  Since we don't
        # know the compute node at this point, we'll just request flavor.vcpus
        # and let the numa_topology filter sort this out.
        numa_cell = objects.NUMACell(siblings=[])
        numa_topology = objects.NUMATopology(cells=[numa_cell])._to_json()
        computenode = objects.ComputeNode(numa_topology=numa_topology)
        normalized_resources = \
                  utils.normalized_resources_for_placement_claim(
                             resources, computenode, vcpus, extra_specs,
                             image_props, instance_numa_topology)

        alloc_reqs_by_rp_uuid, provider_summaries = None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(
                normalized_resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries = None, None
            else:
                alloc_reqs, provider_summaries = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")

                # Determine the rejection reasons for all hosts based on
                # placement vcpu, memory, and disk criteria. This is done
                # after-the-fact since the placement query does not return
                # any reasons.
                reasons = self.placement_client.get_rejection_reasons(
                    requested=normalized_resources)
                if reasons is None:
                    reasons = {}

                # Populate per-host rejection map based on placement criteria.
                host_states = self.driver.host_manager.get_all_host_states(
                    ctxt)
                for host_state in host_states:
                    if host_state.uuid in reasons:
                        msg = reasons[host_state.uuid]
                        if msg:
                            nova_utils.filter_reject('Placement',
                                                     host_state,
                                                     spec_obj,
                                                     msg,
                                                     append=False)

                reason = 'Placement service found no hosts.'
                filter_properties = spec_obj.to_legacy_filter_properties_dict()
                utils.NoValidHost_extend(filter_properties, reason=reason)
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rr in ar['allocations']:
                        rp_uuid = rr['resource_provider']['uuid']
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
                                                alloc_reqs_by_rp_uuid,
                                                provider_summaries)
        dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
        return jsonutils.to_primitive(dest_dicts)
Пример #40
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None,
                            return_objects=False,
                            return_alternates=False):
        """Returns destinations(s) best suited for this RequestSpec.

        Starting in Queens, this method returns a list of lists of Selection
        objects, with one list for each requested instance. Each instance's
        list will have its first element be the Selection object representing
        the chosen host for the instance, and if return_alternates is True,
        zero or more alternate objects that could also satisfy the request. The
        number of alternates is determined by the configuration option
        `CONF.scheduler.max_attempts`.

        The ability of a calling method to handle this format of returned
        destinations is indicated by a True value in the parameter
        `return_objects`. However, there may still be some older conductors in
        a deployment that have not been updated to Queens, and in that case
        return_objects will be False, and the result will be a list of dicts
        with 'host', 'nodename' and 'limits' as keys. When return_objects is
        False, the value of return_alternates has no effect. The reason there
        are two kwarg parameters return_objects and return_alternates is so we
        can differentiate between callers that understand the Selection object
        format but *don't* want to get alternate hosts, as is the case with the
        conductors that handle certain move operations.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)

        is_rebuild = utils.request_is_rebuild(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        if not is_rebuild:
            try:
                request_filter.process_reqspec(ctxt, spec_obj)
            except exception.RequestFilterFailed as e:
                raise exception.NoValidHost(reason=e.message)

            resources = utils.resources_from_request_spec(
                ctxt,
                spec_obj,
                self.driver.host_manager,
                enable_pinning_translate=True)
            res = self.placement_client.get_allocation_candidates(
                ctxt, resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                res = None, None, None

            alloc_reqs, provider_summaries, allocation_request_version = res
            alloc_reqs = alloc_reqs or []
            provider_summaries = provider_summaries or {}

            # if the user requested pinned CPUs, we make a second query to
            # placement for allocation candidates using VCPUs instead of PCPUs.
            # This is necessary because users might not have modified all (or
            # any) of their compute nodes meaning said compute nodes will not
            # be reporting PCPUs yet. This is okay to do because the
            # NUMATopologyFilter (scheduler) or virt driver (compute node) will
            # weed out hosts that are actually using new style configuration
            # but simply don't have enough free PCPUs (or any PCPUs).
            # TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set'
            if (resources.cpu_pinning_requested
                    and not CONF.workarounds.disable_fallback_pcpu_query):
                LOG.debug('Requesting fallback allocation candidates with '
                          'VCPU instead of PCPU')
                resources = utils.resources_from_request_spec(
                    ctxt,
                    spec_obj,
                    self.driver.host_manager,
                    enable_pinning_translate=False)
                res = self.placement_client.get_allocation_candidates(
                    ctxt, resources)
                if res:
                    # merge the allocation requests and provider summaries from
                    # the two requests together
                    alloc_reqs_fallback, provider_summaries_fallback, _ = res

                    alloc_reqs.extend(alloc_reqs_fallback)
                    provider_summaries.update(provider_summaries_fallback)

            if not alloc_reqs:
                LOG.info("Got no allocation candidates from the Placement "
                         "API. This could be due to insufficient resources "
                         "or a temporary occurrence as compute nodes start "
                         "up.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rp_uuid in ar['allocations']:
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        # Only return alternates if both return_objects and return_alternates
        # are True.
        return_alternates = return_alternates and return_objects
        selections = self.driver.select_destinations(
            ctxt, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
            provider_summaries, allocation_request_version, return_alternates)
        # If `return_objects` is False, we need to convert the selections to
        # the older format, which is a list of host state dicts.
        if not return_objects:
            selection_dicts = [sel[0].to_dict() for sel in selections]
            return jsonutils.to_primitive(selection_dicts)
        return selections
Пример #41
0
 def _test_resources_from_request_spec(self, flavor, expected):
     fake_spec = objects.RequestSpec(flavor=flavor)
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertEqual(expected, resources)
Пример #42
0
 def _test_resources_from_request_spec(self, flavor, expected):
     fake_spec = objects.RequestSpec(flavor=flavor)
     resources = utils.resources_from_request_spec(fake_spec)
     self.assertResourceRequestsEqual(expected, resources)
Пример #43
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None,
                            return_objects=False,
                            return_alternates=False):
        """Returns destinations(s) best suited for this RequestSpec.

        Starting in Queens, this method returns a list of lists of Selection
        objects, with one list for each requested instance. Each instance's
        list will have its first element be the Selection object representing
        the chosen host for the instance, and if return_alternates is True,
        zero or more alternate objects that could also satisfy the request. The
        number of alternates is determined by the configuration option
        `CONF.scheduler.max_attempts`.

        The ability of a calling method to handle this format of returned
        destinations is indicated by a True value in the parameter
        `return_objects`. However, there may still be some older conductors in
        a deployment that have not been updated to Queens, and in that case
        return_objects will be False, and the result will be a list of dicts
        with 'host', 'nodename' and 'limits' as keys. When return_objects is
        False, the value of return_alternates has no effect. The reason there
        are two kwarg parameters return_objects and return_alternates is so we
        can differentiate between callers that understand the Selection object
        format but *don't* want to get alternate hosts, as is the case with the
        conductors that handle certain move operations.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)

        try:
            request_filter.process_reqspec(ctxt, spec_obj)
        except exception.RequestFilterFailed as e:
            raise exception.NoValidHost(reason=e.message)

        resources = utils.resources_from_request_spec(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(
                ctxt, resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries, allocation_request_version = (
                    None, None, None)
            else:
                (alloc_reqs, provider_summaries,
                 allocation_request_version) = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rp_uuid in ar['allocations']:
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        # Only return alternates if both return_objects and return_alternates
        # are True.
        return_alternates = return_alternates and return_objects
        selections = self.driver.select_destinations(
            ctxt, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
            provider_summaries, allocation_request_version, return_alternates)
        # If `return_objects` is False, we need to convert the selections to
        # the older format, which is a list of host state dicts.
        if not return_objects:
            selection_dicts = [sel[0].to_dict() for sel in selections]
            return jsonutils.to_primitive(selection_dicts)
        return selections
Пример #44
0
from oslo_log import log as logging
from oslo_utils import importutils
import nova.conf
from nova import config
from nova import objects
from nova import context
from nova.scheduler import utils
from nova.scheduler import client as scheduler_client

CONF = nova.conf.CONF

logging.setup(CONF, 'nova')
LOG = logging.getLogger(__name__)

argv = []
default_config_files = ['/etc/nova/nova.conf']
config.parse_args(argv, default_config_files=default_config_files)
objects.register_all()
context = context.get_admin_context()

client = scheduler_client.SchedulerClient()
placement_client = client.reportclient

instance_uuid = 'fdc43c5c-49e1-448b-8cb1-c0d73030697f'
request_spec = objects.RequestSpec.get_by_instance_uuid(context, instance_uuid)
resources = utils.resources_from_request_spec(request_spec)
res = placement_client.get_allocation_candidates(resources)
alloc_reqs, provider_summaries = res