コード例 #1
0
ファイル: chance_scheduler.py プロジェクト: zwphit/zun
    def _schedule(self, context):
        """Picks a host that is up at random."""
        hosts = self.hosts_up(context)
        if not hosts:
            msg = _("Is the appropriate service running?")
            raise exception.NoValidHost(reason=msg)

        return random.choice(hosts)
コード例 #2
0
 def test_schedule_container_exception(self, mock_save,
                                       mock_schedule_container):
     container = self.container
     container.status = consts.CREATING
     mock_schedule_container.side_effect = exception.NoValidHost(
         reason='not enough host')
     self.compute_api.container_create(self.context, container, None, None,
                                       None, False)
     self.assertTrue(mock_schedule_container.called)
     self.assertTrue(mock_save.called)
     self.assertEqual(consts.ERROR, container.status)
コード例 #3
0
ファイル: chance_scheduler.py プロジェクト: zwphit/zun
    def select_destinations(self, context, containers, extra_spec):
        """Selects random destinations."""
        dests = []
        for container in containers:
            host = self._schedule(context)
            host_state = dict(host=host, nodename=None, limits=None)
            dests.append(host_state)

        if len(dests) < 1:
            reason = _('There are not enough hosts available.')
            raise exception.NoValidHost(reason=reason)

        return dests
コード例 #4
0
    def _schedule(self, context, container, extra_spec):
        """Picks a host according to filters."""
        hosts = self.hosts_up(context)
        nodes = objects.ComputeNode.list(context)
        nodes = [node for node in nodes if node.hostname in hosts]
        host_states = self.get_all_host_state(nodes)
        hosts = self.filter_handler.get_filtered_objects(
            self.enabled_filters, host_states, container, extra_spec)
        if not hosts:
            msg = _("Is the appropriate service running?")
            raise exception.NoValidHost(reason=msg)

        return random.choice(hosts)
コード例 #5
0
    def select_destinations(self, context, containers, extra_specs):
        LOG.debug("Starting to schedule for containers: %s",
                  [c.uuid for c in containers])

        if not self.traits_ensured:
            self.placement_client._ensure_traits(context, consts.CUSTOM_TRAITS)
            self.traits_ensured = True

        alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
            = None, None, None
        request_filter.process_reqspec(context, extra_specs)
        resources = utils.resources_from_request_spec(context, containers[0],
                                                      extra_specs)

        try:
            res = self.placement_client.get_allocation_candidates(
                context, resources)
            (alloc_reqs, provider_summaries, allocation_request_version) = res
        except (ks_exc.EndpointNotFound, ks_exc.MissingAuthPlugin,
                ks_exc.Unauthorized, ks_exc.DiscoveryFailure,
                ks_exc.ConnectFailure):
            # We have to handle the case that we failed to connect to the
            # Placement service.
            alloc_reqs, provider_summaries, allocation_request_version = (None,
                                                                          None,
                                                                          None)
        if not alloc_reqs:
            LOG.info("Got no allocation candidates from the Placement "
                     "API. This could be due to insufficient resources "
                     "or a temporary occurrence as compute nodes start "
                     "up.")
            raise exception.NoValidHost(reason="")
        else:
            # Build a dict of lists of allocation requests, keyed by
            # provider UUID, so that when we attempt to claim resources for
            # a host, we can grab an allocation request easily
            alloc_reqs_by_rp_uuid = collections.defaultdict(list)
            for ar in alloc_reqs:
                for rp_uuid in ar['allocations']:
                    alloc_reqs_by_rp_uuid[rp_uuid].append(ar)

        selections = self.driver.select_destinations(
            context, containers, extra_specs, alloc_reqs_by_rp_uuid,
            provider_summaries, allocation_request_version)
        return selections
コード例 #6
0
ファイル: filter_scheduler.py プロジェクト: wanghuiict/zun
    def select_destinations(self,
                            context,
                            containers,
                            extra_specs,
                            alloc_reqs_by_rp_uuid,
                            provider_summaries,
                            allocation_request_version=None):
        """Selects destinations by filters."""
        dests = []
        for container in containers:
            host = self._schedule(context, container, extra_specs,
                                  alloc_reqs_by_rp_uuid, provider_summaries,
                                  allocation_request_version)
            host_state = dict(host=host.hostname,
                              nodename=None,
                              limits=host.limits)
            dests.append(host_state)

        if len(dests) < 1:
            reason = _('There are not enough hosts available.')
            raise exception.NoValidHost(reason=reason)

        return dests
コード例 #7
0
ファイル: filter_scheduler.py プロジェクト: wanghuiict/zun
    def _schedule(self,
                  context,
                  container,
                  extra_specs,
                  alloc_reqs_by_rp_uuid,
                  provider_summaries,
                  allocation_request_version=None):
        """Picks a host according to filters."""
        elevated = context.elevated()

        # NOTE(jaypipes): provider_summaries being None is treated differently
        # from an empty dict. provider_summaries is None when we want to grab
        # all compute nodes.
        # The provider_summaries variable will be an empty dict when the
        # Placement API found no providers that match the requested
        # constraints, which in turn makes compute_uuids an empty list and
        # objects.ComputeNode.list will return an empty list
        # also, which will eventually result in a NoValidHost error.
        compute_uuids = None
        if provider_summaries is not None:
            compute_uuids = list(provider_summaries.keys())
        if compute_uuids is None:
            nodes = objects.ComputeNode.list(context)
        else:
            nodes = objects.ComputeNode.list(
                context, filters={'rp_uuid': compute_uuids})

        services = self._get_services_by_host(context)
        hosts = services.keys()
        nodes = [node for node in nodes if node.hostname in hosts]
        host_states = self.get_all_host_state(nodes, services)
        hosts = self._get_filtered_hosts(host_states, container, extra_specs)
        if not hosts:
            msg = _("Is the appropriate service running?")
            raise exception.NoValidHost(reason=msg)

        # Attempt to claim the resources against one or more resource
        # providers, looping over the sorted list of possible hosts
        # looking for an allocation_request that contains that host's
        # resource provider UUID
        claimed_host = None
        for host in hosts:
            cn_uuid = host.uuid
            if cn_uuid not in alloc_reqs_by_rp_uuid:
                msg = ("A host state with uuid = '%s' that did not have a "
                       "matching allocation_request was encountered while "
                       "scheduling. This host was skipped.")
                LOG.debug(msg, cn_uuid)
                continue

            alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid]
            # TODO(jaypipes): Loop through all allocation_requests instead
            # of just trying the first one. For now, since we'll likely
            # want to order the allocation_requests in the future based on
            # information in the provider summaries, we'll just try to
            # claim resources using the first allocation_request
            alloc_req = alloc_reqs[0]
            if utils.claim_resources(
                    elevated,
                    self.placement_client,
                    container,
                    alloc_req,
                    allocation_request_version=allocation_request_version):
                claimed_host = host
                break

        if claimed_host is None:
            # We weren't able to claim resources in the placement API
            # for any of the sorted hosts identified. So, clean up any
            # successfully-claimed resources for prior containers in
            # this request and return an empty list which will cause
            # select_destinations() to raise NoValidHost
            msg = _("Unable to successfully claim against any host.")
            raise exception.NoValidHost(reason=msg)

        # Now consume the resources so the filter/weights will change for
        # the next container.
        self._consume_selected_host(claimed_host, container)

        return claimed_host
コード例 #8
0
ファイル: utils.py プロジェクト: wanghuiict/zun
def resources_from_request_spec(ctxt, container_obj, extra_specs):
    """Given a Container object, returns a ResourceRequest of the resources,
    traits, and aggregates it represents.
    :param ctxt: The request context.
    :param container_obj: A Container object.
    :return: A ResourceRequest object.
    :raises NoValidHost: If the specified host/node is not found in the DB.
    """
    cpu = container_obj.cpu if container_obj.cpu else CONF.default_cpu
    # NOTE(hongbin): Container is allowed to take partial core (i.e. 0.1)
    # but placement doesn't support it. Therefore, we take the ceil of
    # the number.
    cpu = int(math.ceil(cpu))
    # NOTE(hongbin): If cpu is 0, claim 1 core in placement because placement
    # doesn't support cpu as 0.
    cpu = cpu if cpu > 1 else 1
    memory = int(container_obj.memory) if container_obj.memory else \
        CONF.default_memory
    # NOTE(hongbin): If memory is 0, claim 1 MB in placement because placement
    # doesn't support memory as 0.
    memory = memory if memory > 1 else 1

    container_resources = {
        orc.VCPU: cpu,
        orc.MEMORY_MB: memory,
    }

    if container_obj.disk and container_obj.disk != 0:
        container_resources[orc.DISK_GB] = container_obj.disk

    # Process extra_specs
    if extra_specs:
        res_req = ResourceRequest.from_extra_specs(extra_specs)
        # If any of the three standard resources above was explicitly given in
        # the extra_specs - in any group - we need to replace it, or delete it
        # if it was given as zero.  We'll do this by grabbing a merged version
        # of the ResourceRequest resources and removing matching items from the
        # container_resources.
        container_resources = {
            rclass: amt
            for rclass, amt in container_resources.items()
            if rclass not in res_req.merged_resources()
        }
        # Now we don't need (or want) any remaining zero entries - remove them.
        res_req.strip_zeros()

        numbered_groups = res_req.get_num_of_numbered_groups()
    else:
        # Start with an empty one
        res_req = ResourceRequest()
        numbered_groups = 0

    # Add the (remaining) items from the container_resources to the
    # sharing group
    for rclass, amount in container_resources.items():
        res_req.get_request_group(None).resources[rclass] = amount

    requested_resources = extra_specs.get('requested_resources', [])
    for group in requested_resources:
        res_req.add_request_group(group)

    target_host = extra_specs.get('requested_host')
    if target_host:
        nodes = objects.ComputeNode.list(ctxt,
                                         filters={'hostname': target_host})
        if not nodes:
            reason = (_('No such host - host: %(host)s ') % {
                'host': target_host
            })
            raise exception.NoValidHost(reason=reason)
        if len(nodes) == 1:
            grp = res_req.get_request_group(None)
            grp.in_tree = nodes[0].rp_uuid
        else:
            # Multiple nodes are found when a target host is specified
            # without a specific node. Since placement doesn't support
            # multiple uuids in the `in_tree` queryparam, what we can do here
            # is to remove the limit from the `GET /a_c` query to prevent
            # the found nodes from being filtered out in placement.
            res_req._limit = None

    # Don't limit allocation candidates when using affinity/anti-affinity.
    if (extra_specs.get('hints')
            and any(key in ['group', 'same_host', 'different_host']
                    for key in extra_specs.get('hints'))):
        res_req._limit = None

    if res_req.get_num_of_numbered_groups() >= 2 and not res_req.group_policy:
        LOG.warning(
            "There is more than one numbered request group in the "
            "allocation candidate query but the container did not specify "
            "any group policy. This query would fail in placement due to "
            "the missing group policy. If you specified more than one "
            "numbered request group in the extra_spec then you need to "
            "specify the group policy in the extra_spec. If it is OK "
            "to let these groups be satisfied by overlapping resource "
            "providers then use 'group_policy': 'none'. If you want each "
            "group to be satisfied from a separate resource provider then "
            "use 'group_policy': 'isolate'.")

        if numbered_groups <= 1:
            LOG.info(
                "At least one numbered request group is defined outside of "
                "the container (e.g. in a port that has a QoS minimum "
                "bandwidth policy rule attached) but the flavor did not "
                "specify any group policy. To avoid the placement failure "
                "nova defaults the group policy to 'none'.")
            res_req.group_policy = 'none'

    return res_req