Esempio n. 1
0
 def test_with_tenant_and_az(self, getmd):
     getmd.side_effect = [
         # Tenant filter
         [objects.Aggregate(
             uuid=uuids.agg1,
             metadata={'filter_tenant_id': 'owner'}),
         objects.Aggregate(
             uuid=uuids.agg2,
             metadata={'filter_tenant_id:12': 'owner'}),
         objects.Aggregate(
             uuid=uuids.agg3,
             metadata={'other_key': 'owner'})],
         # AZ filter
         [objects.Aggregate(
             uuid=uuids.agg4,
             metadata={'availability_zone': 'myaz'})],
     ]
     reqspec = objects.RequestSpec(project_id='owner',
                                   availability_zone='myaz')
     request_filter.process_reqspec(self.context, reqspec)
     self.assertEqual(
         ','.join(sorted([uuids.agg1, uuids.agg2])),
         ','.join(sorted(
             reqspec.requested_destination.aggregates[0].split(','))))
     self.assertEqual(
         ','.join(sorted([uuids.agg4])),
         ','.join(sorted(
             reqspec.requested_destination.aggregates[1].split(','))))
     getmd.assert_has_calls([
         mock.call(self.context, value='owner'),
         mock.call(self.context,
                   key='availability_zone',
                   value='myaz')])
Esempio n. 2
0
 def test_process_reqspec(self):
     fake_filters = [mock.MagicMock(), mock.MagicMock()]
     with mock.patch('nova.scheduler.request_filter.ALL_REQUEST_FILTERS',
                     new=fake_filters):
         request_filter.process_reqspec(mock.sentinel.context,
                                        mock.sentinel.reqspec)
     for filter in fake_filters:
         filter.assert_called_once_with(mock.sentinel.context,
                                        mock.sentinel.reqspec)
Esempio n. 3
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None,
                            return_objects=False,
                            return_alternates=False):
        """Returns destinations(s) best suited for this RequestSpec.

        Starting in Queens, this method returns a list of lists of Selection
        objects, with one list for each requested instance. Each instance's
        list will have its first element be the Selection object representing
        the chosen host for the instance, and if return_alternates is True,
        zero or more alternate objects that could also satisfy the request. The
        number of alternates is determined by the configuration option
        `CONF.scheduler.max_attempts`.

        The ability of a calling method to handle this format of returned
        destinations is indicated by a True value in the parameter
        `return_objects`. However, there may still be some older conductors in
        a deployment that have not been updated to Queens, and in that case
        return_objects will be False, and the result will be a list of dicts
        with 'host', 'nodename' and 'limits' as keys. When return_objects is
        False, the value of return_alternates has no effect. The reason there
        are two kwarg parameters return_objects and return_alternates is so we
        can differentiate between callers that understand the Selection object
        format but *don't* want to get alternate hosts, as is the case with the
        conductors that handle certain move operations.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)

        try:
            request_filter.process_reqspec(ctxt, spec_obj)
        except exception.RequestFilterFailed as e:
            raise exception.NoValidHost(reason=e.message)

        resources = utils.resources_from_request_spec(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        if self.driver.USES_ALLOCATION_CANDIDATES:
            res = self.placement_client.get_allocation_candidates(
                ctxt, resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries, allocation_request_version = (
                    None, None, None)
            else:
                (alloc_reqs, provider_summaries,
                 allocation_request_version) = res
            if not alloc_reqs:
                LOG.debug("Got no allocation candidates from the Placement "
                          "API. This may be a temporary occurrence as compute "
                          "nodes start up and begin reporting inventory to "
                          "the Placement service.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rp_uuid in ar['allocations']:
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        # Only return alternates if both return_objects and return_alternates
        # are True.
        return_alternates = return_alternates and return_objects
        selections = self.driver.select_destinations(
            ctxt, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
            provider_summaries, allocation_request_version, return_alternates)
        # If `return_objects` is False, we need to convert the selections to
        # the older format, which is a list of host state dicts.
        if not return_objects:
            selection_dicts = [sel[0].to_dict() for sel in selections]
            return jsonutils.to_primitive(selection_dicts)
        return selections
Esempio n. 4
0
    def test_with_tenant_and_az_and_traits(self, mock_getmd, mock_getnotmd):
        mock_getmd.side_effect = [
            # Tenant filter
            [
                objects.Aggregate(uuid=uuids.agg1,
                                  metadata={'filter_tenant_id': 'owner'}),
                objects.Aggregate(uuid=uuids.agg2,
                                  metadata={'filter_tenant_id:12': 'owner'}),
                objects.Aggregate(uuid=uuids.agg3,
                                  metadata={'other_key': 'owner'})
            ],
            # AZ filter
            [
                objects.Aggregate(uuid=uuids.agg4,
                                  metadata={'availability_zone': 'myaz'})
            ],
        ]

        mock_getnotmd.side_effect = [
            # isolate_aggregates filter
            [
                objects.Aggregate(uuid=uuids.agg1,
                                  metadata={
                                      'trait:CUSTOM_WINDOWS_LICENSED_TRAIT':
                                      'required'
                                  }),
                objects.Aggregate(uuid=uuids.agg2,
                                  metadata={
                                      'trait:CUSTOM_WINDOWS_LICENSED_TRAIT':
                                      'required',
                                      'trait:CUSTOM_XYZ_TRAIT': 'required'
                                  }),
                objects.Aggregate(
                    uuid=uuids.agg3,
                    metadata={'trait:CUSTOM_XYZ_TRAIT': 'required'}),
            ],
        ]

        traits = set(['HW_GPU_API_DXVA', 'HW_NIC_DCB_ETS'])
        fake_flavor = objects.Flavor(
            vcpus=1,
            memory_mb=1024,
            root_gb=10,
            ephemeral_gb=5,
            swap=0,
            extra_specs={'trait:' + trait: 'required'
                         for trait in traits})
        fake_image = objects.ImageMeta(properties=objects.ImageMetaProps(
            traits_required=[]))
        reqspec = objects.RequestSpec(project_id='owner',
                                      availability_zone='myaz',
                                      flavor=fake_flavor,
                                      image=fake_image)
        request_filter.process_reqspec(self.context, reqspec)
        self.assertEqual(
            ','.join(sorted([uuids.agg1, uuids.agg2])), ','.join(
                sorted(
                    reqspec.requested_destination.aggregates[0].split(','))))
        self.assertEqual(
            ','.join(sorted([uuids.agg4])), ','.join(
                sorted(
                    reqspec.requested_destination.aggregates[1].split(','))))
        self.assertItemsEqual(
            set([uuids.agg1, uuids.agg2, uuids.agg3]),
            reqspec.requested_destination.forbidden_aggregates)
        mock_getmd.assert_has_calls([
            mock.call(self.context, value='owner'),
            mock.call(self.context, key='availability_zone', value='myaz')
        ])

        keys = ['trait:%s' % trait for trait in traits]
        mock_getnotmd.assert_called_once_with(self.context,
                                              utils.ItemsMatcher(keys),
                                              'trait:',
                                              value='required')
Esempio n. 5
0
    def select_destinations(self, ctxt, request_spec=None,
            filter_properties=None, spec_obj=_sentinel, instance_uuids=None,
            return_objects=False, return_alternates=False):
        """Returns destinations(s) best suited for this RequestSpec.

        Starting in Queens, this method returns a list of lists of Selection
        objects, with one list for each requested instance. Each instance's
        list will have its first element be the Selection object representing
        the chosen host for the instance, and if return_alternates is True,
        zero or more alternate objects that could also satisfy the request. The
        number of alternates is determined by the configuration option
        `CONF.scheduler.max_attempts`.

        The ability of a calling method to handle this format of returned
        destinations is indicated by a True value in the parameter
        `return_objects`. However, there may still be some older conductors in
        a deployment that have not been updated to Queens, and in that case
        return_objects will be False, and the result will be a list of dicts
        with 'host', 'nodename' and 'limits' as keys. When return_objects is
        False, the value of return_alternates has no effect. The reason there
        are two kwarg parameters return_objects and return_alternates is so we
        can differentiate between callers that understand the Selection object
        format but *don't* want to get alternate hosts, as is the case with the
        conductors that handle certain move operations.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(ctxt,
                                                           request_spec,
                                                           filter_properties)

        is_rebuild = utils.request_is_rebuild(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        if self.driver.USES_ALLOCATION_CANDIDATES and not is_rebuild:
            # Only process the Placement request spec filters when Placement
            # is used.
            try:
                request_filter.process_reqspec(ctxt, spec_obj)
            except exception.RequestFilterFailed as e:
                raise exception.NoValidHost(reason=e.message)

            resources = utils.resources_from_request_spec(
                ctxt, spec_obj, self.driver.host_manager)
            res = self.placement_client.get_allocation_candidates(ctxt,
                                                                  resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                alloc_reqs, provider_summaries, allocation_request_version = (
                        None, None, None)
            else:
                (alloc_reqs, provider_summaries,
                            allocation_request_version) = res
            if not alloc_reqs:
                LOG.info("Got no allocation candidates from the Placement "
                         "API. This could be due to insufficient resources "
                         "or a temporary occurrence as compute nodes start "
                         "up.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rp_uuid in ar['allocations']:
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        # Only return alternates if both return_objects and return_alternates
        # are True.
        return_alternates = return_alternates and return_objects
        selections = self.driver.select_destinations(ctxt, spec_obj,
                instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries,
                allocation_request_version, return_alternates)
        # If `return_objects` is False, we need to convert the selections to
        # the older format, which is a list of host state dicts.
        if not return_objects:
            selection_dicts = [sel[0].to_dict() for sel in selections]
            return jsonutils.to_primitive(selection_dicts)
        return selections
Esempio n. 6
0
    def select_destinations(self,
                            ctxt,
                            request_spec=None,
                            filter_properties=None,
                            spec_obj=_sentinel,
                            instance_uuids=None,
                            return_objects=False,
                            return_alternates=False):
        """Returns destinations(s) best suited for this RequestSpec.

        Starting in Queens, this method returns a list of lists of Selection
        objects, with one list for each requested instance. Each instance's
        list will have its first element be the Selection object representing
        the chosen host for the instance, and if return_alternates is True,
        zero or more alternate objects that could also satisfy the request. The
        number of alternates is determined by the configuration option
        `CONF.scheduler.max_attempts`.

        The ability of a calling method to handle this format of returned
        destinations is indicated by a True value in the parameter
        `return_objects`. However, there may still be some older conductors in
        a deployment that have not been updated to Queens, and in that case
        return_objects will be False, and the result will be a list of dicts
        with 'host', 'nodename' and 'limits' as keys. When return_objects is
        False, the value of return_alternates has no effect. The reason there
        are two kwarg parameters return_objects and return_alternates is so we
        can differentiate between callers that understand the Selection object
        format but *don't* want to get alternate hosts, as is the case with the
        conductors that handle certain move operations.
        """
        LOG.debug("Starting to schedule for instances: %s", instance_uuids)

        # TODO(sbauza): Change the method signature to only accept a spec_obj
        # argument once API v5 is provided.
        if spec_obj is self._sentinel:
            spec_obj = objects.RequestSpec.from_primitives(
                ctxt, request_spec, filter_properties)

        is_rebuild = utils.request_is_rebuild(spec_obj)
        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        if not is_rebuild:
            try:
                request_filter.process_reqspec(ctxt, spec_obj)
            except exception.RequestFilterFailed as e:
                raise exception.NoValidHost(reason=e.message)

            resources = utils.resources_from_request_spec(
                ctxt,
                spec_obj,
                self.driver.host_manager,
                enable_pinning_translate=True)
            res = self.placement_client.get_allocation_candidates(
                ctxt, resources)
            if res is None:
                # We have to handle the case that we failed to connect to the
                # Placement service and the safe_connect decorator on
                # get_allocation_candidates returns None.
                res = None, None, None

            alloc_reqs, provider_summaries, allocation_request_version = res
            alloc_reqs = alloc_reqs or []
            provider_summaries = provider_summaries or {}

            # if the user requested pinned CPUs, we make a second query to
            # placement for allocation candidates using VCPUs instead of PCPUs.
            # This is necessary because users might not have modified all (or
            # any) of their compute nodes meaning said compute nodes will not
            # be reporting PCPUs yet. This is okay to do because the
            # NUMATopologyFilter (scheduler) or virt driver (compute node) will
            # weed out hosts that are actually using new style configuration
            # but simply don't have enough free PCPUs (or any PCPUs).
            # TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set'
            if (resources.cpu_pinning_requested
                    and not CONF.workarounds.disable_fallback_pcpu_query):
                LOG.debug('Requesting fallback allocation candidates with '
                          'VCPU instead of PCPU')
                resources = utils.resources_from_request_spec(
                    ctxt,
                    spec_obj,
                    self.driver.host_manager,
                    enable_pinning_translate=False)
                res = self.placement_client.get_allocation_candidates(
                    ctxt, resources)
                if res:
                    # merge the allocation requests and provider summaries from
                    # the two requests together
                    alloc_reqs_fallback, provider_summaries_fallback, _ = res

                    alloc_reqs.extend(alloc_reqs_fallback)
                    provider_summaries.update(provider_summaries_fallback)

            if not alloc_reqs:
                LOG.info("Got no allocation candidates from the Placement "
                         "API. This could be due to insufficient resources "
                         "or a temporary occurrence as compute nodes start "
                         "up.")
                raise exception.NoValidHost(reason="")
            else:
                # Build a dict of lists of allocation requests, keyed by
                # provider UUID, so that when we attempt to claim resources for
                # a host, we can grab an allocation request easily
                alloc_reqs_by_rp_uuid = collections.defaultdict(list)
                for ar in alloc_reqs:
                    for rp_uuid in ar['allocations']:
                        alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        # Only return alternates if both return_objects and return_alternates
        # are True.
        return_alternates = return_alternates and return_objects
        selections = self.driver.select_destinations(
            ctxt, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
            provider_summaries, allocation_request_version, return_alternates)
        # If `return_objects` is False, we need to convert the selections to
        # the older format, which is a list of host state dicts.
        if not return_objects:
            selection_dicts = [sel[0].to_dict() for sel in selections]
            return jsonutils.to_primitive(selection_dicts)
        return selections