def _claim_resources(self, ctx, spec_obj, instance_uuid, alloc_reqs, allocation_request_version=None): """Given an instance UUID (representing the consumer of resources), the HostState object for the host that was chosen for the instance, and a list of allocation_request JSON objects, attempt to claim resources for the instance in the placement API. Returns True if the claim process was successful, False otherwise. :param ctx: The RequestContext object :param spec_obj: The RequestSpec object :param instance_uuid: The UUID of the consuming instance :param cn_uuid: UUID of the host to allocate against :param alloc_reqs: A list of allocation_request JSON objects that allocate against (at least) the compute host selected by the _schedule() method. These allocation_requests were constructed from a call to the GET /allocation_candidates placement API call. Each allocation_request satisfies the original request for resources and can be supplied as-is (along with the project and user ID to the placement API's PUT /allocations/{consumer_uuid} call to claim resources for the instance :param allocation_request_version: The microversion used to request the allocations. """ if utils.request_is_rebuild(spec_obj): # NOTE(danms): This is a rebuild-only scheduling request, so we # should not be doing any extra claiming LOG.debug('Not claiming resources in the placement API for ' 'rebuild-only scheduling of instance %(uuid)s', {'uuid': instance_uuid}) return True LOG.debug("Attempting to claim resources in the placement API for " "instance %s", instance_uuid) project_id = spec_obj.project_id # NOTE(jaypipes): So, the RequestSpec doesn't store the user_id, # only the project_id, so we need to grab the user information from # the context. Perhaps we should consider putting the user ID in # the spec object? user_id = ctx.user_id # TODO(jaypipes): Loop through all allocation_requests instead of just # trying the first one. For now, since we'll likely want to order the # allocation_requests in the future based on information in the # provider summaries, we'll just try to claim resources using the first # allocation_request alloc_req = alloc_reqs[0] return self.placement_client.claim_resources(instance_uuid, alloc_req, project_id, user_id, allocation_request_version=allocation_request_version)
def _filter_one(self, obj, spec): """Return True if the object passes the filter, otherwise False.""" # Do this here so we don't get scheduler.filters.utils from nova.scheduler import utils if not self.RUN_ON_REBUILD and utils.request_is_rebuild(spec): # If we don't filter, default to passing the host. return True else: # We are either a rebuild filter, in which case we always run, # or this request is not rebuild in which case all filters # should run. return self.host_passes(obj, spec)
def select_destinations(self, ctxt, request_spec=None, filter_properties=None, spec_obj=_sentinel, instance_uuids=None, return_objects=False, return_alternates=False): """Returns destinations(s) best suited for this RequestSpec. Starting in Queens, this method returns a list of lists of Selection objects, with one list for each requested instance. Each instance's list will have its first element be the Selection object representing the chosen host for the instance, and if return_alternates is True, zero or more alternate objects that could also satisfy the request. The number of alternates is determined by the configuration option `CONF.scheduler.max_attempts`. The ability of a calling method to handle this format of returned destinations is indicated by a True value in the parameter `return_objects`. However, there may still be some older conductors in a deployment that have not been updated to Queens, and in that case return_objects will be False, and the result will be a list of dicts with 'host', 'nodename' and 'limits' as keys. When return_objects is False, the value of return_alternates has no effect. The reason there are two kwarg parameters return_objects and return_alternates is so we can differentiate between callers that understand the Selection object format but *don't* want to get alternate hosts, as is the case with the conductors that handle certain move operations. """ LOG.debug("Starting to schedule for instances: %s", instance_uuids) # TODO(sbauza): Change the method signature to only accept a spec_obj # argument once API v5 is provided. if spec_obj is self._sentinel: spec_obj = objects.RequestSpec.from_primitives(ctxt, request_spec, filter_properties) is_rebuild = utils.request_is_rebuild(spec_obj) alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \ = None, None, None if self.driver.USES_ALLOCATION_CANDIDATES and not is_rebuild: # Only process the Placement request spec filters when Placement # is used. try: request_filter.process_reqspec(ctxt, spec_obj) except exception.RequestFilterFailed as e: raise exception.NoValidHost(reason=e.message) resources = utils.resources_from_request_spec( ctxt, spec_obj, self.driver.host_manager) res = self.placement_client.get_allocation_candidates(ctxt, resources) if res is None: # We have to handle the case that we failed to connect to the # Placement service and the safe_connect decorator on # get_allocation_candidates returns None. alloc_reqs, provider_summaries, allocation_request_version = ( None, None, None) else: (alloc_reqs, provider_summaries, allocation_request_version) = res if not alloc_reqs: LOG.info("Got no allocation candidates from the Placement " "API. This could be due to insufficient resources " "or a temporary occurrence as compute nodes start " "up.") raise exception.NoValidHost(reason="") else: # Build a dict of lists of allocation requests, keyed by # provider UUID, so that when we attempt to claim resources for # a host, we can grab an allocation request easily alloc_reqs_by_rp_uuid = collections.defaultdict(list) for ar in alloc_reqs: for rp_uuid in ar['allocations']: alloc_reqs_by_rp_uuid[rp_uuid].append(ar) # Only return alternates if both return_objects and return_alternates # are True. return_alternates = return_alternates and return_objects selections = self.driver.select_destinations(ctxt, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version, return_alternates) # If `return_objects` is False, we need to convert the selections to # the older format, which is a list of host state dicts. if not return_objects: selection_dicts = [sel[0].to_dict() for sel in selections] return jsonutils.to_primitive(selection_dicts) return selections
def select_destinations(self, ctxt, request_spec=None, filter_properties=None, spec_obj=_sentinel, instance_uuids=None, return_objects=False, return_alternates=False): """Returns destinations(s) best suited for this RequestSpec. Starting in Queens, this method returns a list of lists of Selection objects, with one list for each requested instance. Each instance's list will have its first element be the Selection object representing the chosen host for the instance, and if return_alternates is True, zero or more alternate objects that could also satisfy the request. The number of alternates is determined by the configuration option `CONF.scheduler.max_attempts`. The ability of a calling method to handle this format of returned destinations is indicated by a True value in the parameter `return_objects`. However, there may still be some older conductors in a deployment that have not been updated to Queens, and in that case return_objects will be False, and the result will be a list of dicts with 'host', 'nodename' and 'limits' as keys. When return_objects is False, the value of return_alternates has no effect. The reason there are two kwarg parameters return_objects and return_alternates is so we can differentiate between callers that understand the Selection object format but *don't* want to get alternate hosts, as is the case with the conductors that handle certain move operations. """ LOG.debug("Starting to schedule for instances: %s", instance_uuids) # TODO(sbauza): Change the method signature to only accept a spec_obj # argument once API v5 is provided. if spec_obj is self._sentinel: spec_obj = objects.RequestSpec.from_primitives( ctxt, request_spec, filter_properties) is_rebuild = utils.request_is_rebuild(spec_obj) alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \ = None, None, None if not is_rebuild: try: request_filter.process_reqspec(ctxt, spec_obj) except exception.RequestFilterFailed as e: raise exception.NoValidHost(reason=e.message) resources = utils.resources_from_request_spec( ctxt, spec_obj, self.driver.host_manager, enable_pinning_translate=True) res = self.placement_client.get_allocation_candidates( ctxt, resources) if res is None: # We have to handle the case that we failed to connect to the # Placement service and the safe_connect decorator on # get_allocation_candidates returns None. res = None, None, None alloc_reqs, provider_summaries, allocation_request_version = res alloc_reqs = alloc_reqs or [] provider_summaries = provider_summaries or {} # if the user requested pinned CPUs, we make a second query to # placement for allocation candidates using VCPUs instead of PCPUs. # This is necessary because users might not have modified all (or # any) of their compute nodes meaning said compute nodes will not # be reporting PCPUs yet. This is okay to do because the # NUMATopologyFilter (scheduler) or virt driver (compute node) will # weed out hosts that are actually using new style configuration # but simply don't have enough free PCPUs (or any PCPUs). # TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set' if (resources.cpu_pinning_requested and not CONF.workarounds.disable_fallback_pcpu_query): LOG.debug('Requesting fallback allocation candidates with ' 'VCPU instead of PCPU') resources = utils.resources_from_request_spec( ctxt, spec_obj, self.driver.host_manager, enable_pinning_translate=False) res = self.placement_client.get_allocation_candidates( ctxt, resources) if res: # merge the allocation requests and provider summaries from # the two requests together alloc_reqs_fallback, provider_summaries_fallback, _ = res alloc_reqs.extend(alloc_reqs_fallback) provider_summaries.update(provider_summaries_fallback) if not alloc_reqs: LOG.info("Got no allocation candidates from the Placement " "API. This could be due to insufficient resources " "or a temporary occurrence as compute nodes start " "up.") raise exception.NoValidHost(reason="") else: # Build a dict of lists of allocation requests, keyed by # provider UUID, so that when we attempt to claim resources for # a host, we can grab an allocation request easily alloc_reqs_by_rp_uuid = collections.defaultdict(list) for ar in alloc_reqs: for rp_uuid in ar['allocations']: alloc_reqs_by_rp_uuid[rp_uuid].append(ar) # Only return alternates if both return_objects and return_alternates # are True. return_alternates = return_alternates and return_objects selections = self.driver.select_destinations( ctxt, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version, return_alternates) # If `return_objects` is False, we need to convert the selections to # the older format, which is a list of host state dicts. if not return_objects: selection_dicts = [sel[0].to_dict() for sel in selections] return jsonutils.to_primitive(selection_dicts) return selections