Пример #1
0
    def _schedule(self, elevated, topic, request_spec, *args, **kwargs):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_type = request_spec.get("instance_type", None)
        if not instance_type:
            msg = _("Scheduler only understands InstanceType-based" "provisioning.")
            raise NotImplementedError(msg)

        cost_functions = self.get_cost_functions()

        ram_requirement_mb = instance_type["memory_mb"]
        disk_requirement_gb = instance_type["local_gb"]

        options = self._get_configuration_options()

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.zone_manager.get_all_host_data(elevated)
        unfiltered_hosts = unfiltered_hosts_dict.items()

        num_instances = request_spec.get("num_instances", 1)
        selected_hosts = []
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            filtered_hosts = self._filter_hosts(topic, request_spec, unfiltered_hosts, options)

            if not filtered_hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(filtered_hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            weighted_host = least_cost.weighted_sum(cost_functions, filtered_hosts, options)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.hostinfo.consume_resources(disk_requirement_gb, ram_requirement_mb)

        # Next, tack on the host weights from the child zones
        if not request_spec.get("local_zone", False):
            json_spec = json.dumps(request_spec)
            all_zones = self._zone_get_all(elevated)
            child_results = self._call_zone_method(elevated, "select", specs=json_spec, zones=all_zones)
            selected_hosts.extend(self._adjust_child_weights(child_results, all_zones))
        selected_hosts.sort(key=operator.attrgetter("weight"))
        return selected_hosts[:num_instances]
Пример #2
0
    def test_weighted_sum_single_function(self):
        fn_tuples = [(1.0, offset)]
        hostinfo_list = self._get_all_hosts()

        # host1: free_ram_mb=0
        # host2: free_ram_mb=1536
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, ]=
        # [10512, 11024, 13072, 18192]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list, options)
        self.assertEqual(weighted_host.weight, 10512)
        self.assertEqual(weighted_host.host_state.host, "host1")
Пример #3
0
    def test_weighted_sum_single_function(self):
        fn_tuples = [(1.0, offset), ]
        hostinfo_list = self.zone_manager.get_all_host_data(None).items()

        # host1: free_ram_mb=0
        # host2: free_ram_mb=1536
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, ]=
        # [10000, 11536, 13072, 18192]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
                                                                    options)
        self.assertEqual(weighted_host.weight, 10000)
        self.assertEqual(weighted_host.host, 'host1')
Пример #4
0
    def test_weighted_sum_single_function(self):
        fn_tuples = [(1.0, offset), ]
        hostinfo_list = self._get_all_hosts()

        # host1: free_ram_mb=0
        # host2: free_ram_mb=1536
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, ]=
        # [10512, 11024, 13072, 18192]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
                                                                    options)
        self.assertEqual(weighted_host.weight, 10512)
        self.assertEqual(weighted_host.host_state.host, 'host1')
Пример #5
0
    def test_weighted_sum_single_function(self):
        fn_tuples = [
            (1.0, offset),
        ]
        hostinfo_list = self.zone_manager.get_all_host_data(None).items()

        # host1: free_ram_mb=0
        # host2: free_ram_mb=1536
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, ]=
        # [10000, 11536, 13072, 18192]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
                                                options)
        self.assertEqual(weighted_host.weight, 10000)
        self.assertEqual(weighted_host.host, 'host1')
Пример #6
0
    def test_basic_costing(self):
        hosts = [
            FakeHost(1, 512 * MB, 100),
            FakeHost(2, 256 * MB, 400),
            FakeHost(3, 512 * MB, 100),
        ]

        weighted_fns = [
            (1, lambda h: h.free_ram),  # Fill-first, free_ram is a *cost*
            (2, lambda h: h.io),  # Avoid high I/O
        ]

        costs = least_cost.weighted_sum(
            domain=hosts, weighted_fns=weighted_fns)

        # Each 256 MB unit of free-ram contributes 0.5 points by way of:
        #   cost = weight * (score/max_score) = 1 * (256/512) = 0.5
        # Each 100 iops of IO adds 0.5 points by way of:
        #   cost = 2 * (100/400) = 2 * 0.25 = 0.5
        expected = [1.5, 2.5, 1.5]
        self.assertEqual(expected, costs)
Пример #7
0
    def test_weighted_sum_happy_day(self):
        fn_tuples = [(1.0, offset), (1.0, scale)]
        hostinfo_list = self._get_all_hosts()

        # host1: free_ram_mb=512
        # host2: free_ram_mb=1024
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, scale]=
        # [10512, 11024, 13072, 18192]
        # [1024,  2048, 6144, 16384]

        # adjusted [ 1.0 * x + 1.0 * y] =
        # [11536, 13072, 19216, 34576]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list, options)
        self.assertEqual(weighted_host.weight, 11536)
        self.assertEqual(weighted_host.host_state.host, "host1")
Пример #8
0
    def test_weighted_sum_happy_day(self):
        fn_tuples = [(1.0, offset), (1.0, scale)]
        hostinfo_list = self._get_all_hosts()

        # host1: free_ram_mb=512
        # host2: free_ram_mb=1024
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, scale]=
        # [10512, 11024, 13072, 18192]
        # [1024,  2048, 6144, 16384]

        # adjusted [ 1.0 * x + 1.0 * y] =
        # [11536, 13072, 19216, 34576]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
                                                options)
        self.assertEqual(weighted_host.weight, 11536)
        self.assertEqual(weighted_host.host_state.host, 'host1')
Пример #9
0
    def test_weighted_sum_happy_day(self):
        fn_tuples = [(1.0, offset), (1.0, scale)]
        hostinfo_list = self.zone_manager.get_all_host_data(None).items()

        # host1: free_ram_mb=0
        # host2: free_ram_mb=1536
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, scale]=
        # [10000, 11536, 13072, 18192]
        # [0,  768, 1536, 4096]

        # adjusted [ 1.0 * x + 1.0 * y] =
        # [10000, 12304, 14608, 22288]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
                                                                    options)
        self.assertEqual(weighted_host.weight, 10000)
        self.assertEqual(weighted_host.host, 'host1')
Пример #10
0
    def test_weighted_sum_happy_day(self):
        fn_tuples = [(1.0, offset), (1.0, scale)]
        hostinfo_list = self.zone_manager.get_all_host_data(None).items()

        # host1: free_ram_mb=0
        # host2: free_ram_mb=1536
        # host3: free_ram_mb=3072
        # host4: free_ram_mb=8192

        # [offset, scale]=
        # [10000, 11536, 13072, 18192]
        # [0,  768, 1536, 4096]

        # adjusted [ 1.0 * x + 1.0 * y] =
        # [10000, 12304, 14608, 22288]

        # so, host1 should win:
        options = {}
        weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
                                                options)
        self.assertEqual(weighted_host.weight, 10000)
        self.assertEqual(weighted_host.host, 'host1')
Пример #11
0
 def test_empty_domain(self):
     domain = []
     weighted_fns = []
     result = least_cost.weighted_sum(domain, weighted_fns)
     expected = []
     self.assertEqual(expected, result)
Пример #12
0
    def _schedule(self, elevated, topic, request_spec, *args, **kwargs):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_type = request_spec.get("instance_type", None)
        if not instance_type:
            msg = _("Scheduler only understands InstanceType-based" \
                    "provisioning.")
            raise NotImplementedError(msg)

        cost_functions = self.get_cost_functions()

        ram_requirement_mb = instance_type['memory_mb']
        disk_requirement_bg = instance_type['local_gb']

        options = self._get_configuration_options()

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.zone_manager.get_all_host_data(elevated)
        unfiltered_hosts = unfiltered_hosts_dict.items()

        num_instances = request_spec.get('num_instances', 1)
        selected_hosts = []
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            filtered_hosts = self._filter_hosts(topic, request_spec,
                    unfiltered_hosts, options)

            if not filtered_hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(filtered_hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            weighted_host = least_cost.weighted_sum(cost_functions,
                                                filtered_hosts, options)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.hostinfo.consume_resources(disk_requirement_bg,
                                        ram_requirement_mb)

        # Next, tack on the host weights from the child zones
        if not request_spec.get('local_zone', False):
            json_spec = json.dumps(request_spec)
            all_zones = self._zone_get_all(elevated)
            child_results = self._call_zone_method(elevated, "select",
                    specs=json_spec, zones=all_zones)
            selected_hosts.extend(self._adjust_child_weights(
                                                    child_results, all_zones))
        selected_hosts.sort(key=operator.attrgetter('weight'))
        return selected_hosts[:num_instances]
Пример #13
0
    def _schedule(self, context, topic, request_spec, *args, **kwargs):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_properties = request_spec['instance_properties']
        instance_type = request_spec.get("instance_type", None)

        cost_functions = self.get_cost_functions()
        config_options = self._get_configuration_options()

        filter_properties = kwargs.get('filter_properties', {})
        filter_properties.update({'context': context,
                                  'request_spec': request_spec,
                                  'config_options': config_options,
                                  'instance_type': instance_type})

        self.populate_filter_properties(request_spec,
                                        filter_properties)

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.host_manager.get_all_host_states(
                elevated, topic)

        # Note: remember, we are using an iterator here. So only
        # traverse this list once. This can bite you if the hosts
        # are being scanned in a filter or weighing function.
        hosts = unfiltered_hosts_dict.itervalues()

        num_instances = request_spec.get('num_instances', 1)
        selected_hosts = []
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            hosts = self.host_manager.filter_hosts(hosts,
                    filter_properties)
            if not hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            # TODO(comstud): filter_properties will also be used for
            # weighing and I plan fold weighing into the host manager
            # in a future patch.  I'll address the naming of this
            # variable at that time.
            weighted_host = least_cost.weighted_sum(cost_functions,
                    hosts, filter_properties)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.host_state.consume_from_instance(
                    instance_properties)

        selected_hosts.sort(key=operator.attrgetter('weight'))
        return selected_hosts[:num_instances]
Пример #14
0
    def _schedule(self, context, topic, request_spec, *args, **kwargs):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_properties = request_spec['instance_properties']
        instance_type = request_spec.get("instance_type", None)
        if not instance_type:
            msg = _("Scheduler only understands InstanceType-based" \
                    "provisioning.")
            raise NotImplementedError(msg)

        cost_functions = self.get_cost_functions()
        config_options = self._get_configuration_options()

        filter_properties = kwargs.get('filter_properties', {})
        filter_properties.update({
            'context': context,
            'request_spec': request_spec,
            'config_options': config_options,
            'instance_type': instance_type
        })

        self.populate_filter_properties(request_spec, filter_properties)

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.host_manager.get_all_host_states(
            elevated, topic)
        hosts = unfiltered_hosts_dict.itervalues()

        num_instances = request_spec.get('num_instances', 1)
        selected_hosts = []
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            hosts = self.host_manager.filter_hosts(hosts, filter_properties)
            if not hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            # TODO(comstud): filter_properties will also be used for
            # weighing and I plan fold weighing into the host manager
            # in a future patch.  I'll address the naming of this
            # variable at that time.
            weighted_host = least_cost.weighted_sum(cost_functions, hosts,
                                                    filter_properties)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.host_state.consume_from_instance(instance_properties)

        # Next, tack on the host weights from the child zones
        if not filter_properties.get('local_zone_only', False):
            json_spec = json.dumps(request_spec)
            all_zones = self._zone_get_all(elevated)
            child_results = self._call_zone_method(elevated,
                                                   "select",
                                                   specs=json_spec,
                                                   zones=all_zones)
            selected_hosts.extend(
                self._adjust_child_weights(child_results, all_zones))
        selected_hosts.sort(key=operator.attrgetter('weight'))
        return selected_hosts[:num_instances]
Пример #15
0
    def _schedule(self,
                  context,
                  topic,
                  request_spec,
                  filter_properties,
                  instance_uuids=None):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_properties = request_spec['instance_properties']
        instance_type = request_spec.get("instance_type", None)

        cost_functions = self.get_cost_functions()
        config_options = self._get_configuration_options()

        # check retry policy.  Rather ugly use of instance_uuids[0]...
        # but if we've exceeded max retries... then we really only
        # have a single instance.
        properties = instance_properties.copy()
        if instance_uuids:
            properties['uuid'] = instance_uuids[0]
        self._populate_retry(filter_properties, properties)

        filter_properties.update({
            'context': context,
            'request_spec': request_spec,
            'config_options': config_options,
            'instance_type': instance_type
        })

        self.populate_filter_properties(request_spec, filter_properties)

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.host_manager.get_all_host_states(
            elevated, topic)

        # Note: remember, we are using an iterator here. So only
        # traverse this list once. This can bite you if the hosts
        # are being scanned in a filter or weighing function.
        hosts = unfiltered_hosts_dict.itervalues()

        selected_hosts = []
        if instance_uuids:
            num_instances = len(instance_uuids)
        else:
            num_instances = request_spec.get('num_instances', 1)
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            hosts = self.host_manager.filter_hosts(hosts, filter_properties)
            if not hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            # TODO(comstud): filter_properties will also be used for
            # weighing and I plan fold weighing into the host manager
            # in a future patch.  I'll address the naming of this
            # variable at that time.
            weighted_host = least_cost.weighted_sum(cost_functions, hosts,
                                                    filter_properties)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.host_state.consume_from_instance(instance_properties)

        selected_hosts.sort(key=operator.attrgetter('weight'))
        return selected_hosts
Пример #16
0
    def _schedule(self, context, topic, request_spec, filter_properties, instance_uuids=None):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_properties = request_spec["instance_properties"]
        instance_type = request_spec.get("instance_type", None)

        cost_functions = self.get_cost_functions()
        config_options = self._get_configuration_options()

        # check retry policy.  Rather ugly use of instance_uuids[0]...
        # but if we've exceeded max retries... then we really only
        # have a single instance.
        properties = instance_properties.copy()
        if instance_uuids:
            properties["uuid"] = instance_uuids[0]
        self._populate_retry(filter_properties, properties)

        filter_properties.update(
            {
                "context": context,
                "request_spec": request_spec,
                "config_options": config_options,
                "instance_type": instance_type,
            }
        )

        self.populate_filter_properties(request_spec, filter_properties)

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.host_manager.get_all_host_states(elevated, topic)

        # Note: remember, we are using an iterator here. So only
        # traverse this list once. This can bite you if the hosts
        # are being scanned in a filter or weighing function.
        hosts = unfiltered_hosts_dict.itervalues()

        selected_hosts = []
        if instance_uuids:
            num_instances = len(instance_uuids)
        else:
            num_instances = request_spec.get("num_instances", 1)
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            hosts = self.host_manager.filter_hosts(hosts, filter_properties)
            if not hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            # TODO(comstud): filter_properties will also be used for
            # weighing and I plan fold weighing into the host manager
            # in a future patch.  I'll address the naming of this
            # variable at that time.
            weighted_host = least_cost.weighted_sum(cost_functions, hosts, filter_properties)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.host_state.consume_from_instance(instance_properties)

        selected_hosts.sort(key=operator.attrgetter("weight"))
        return selected_hosts
Пример #17
0
    def _schedule(self, context, topic, request_spec, filter_properties,
                  instance_uuids=None):
        """Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()
        if topic != "compute":
            msg = _("Scheduler only understands Compute nodes (for now)")
            raise NotImplementedError(msg)

        instance_properties = request_spec['instance_properties']
        instance_type = request_spec.get("instance_type", None)

        # Note(gtt): Make sure "extra_specs" in instance_type dict object.
        # Since db.instance_type_get_by_flavor_id will change the type of
        # extra_specs to dict, so no matter if "instance_type" is exists,
        # update it with new from db.
        if instance_type is not None:
            try:
                instance_type = db.instance_type_get_by_flavor_id(
                    context, instance_type.get('flavorid'))
            except exception.FlavorNotFound:
                LOG.warn("flavorid not in instance_type. instance_type: %s" %
                         instance_type)
            else:
                request_spec.update({"instance_type": instance_type})

        cost_functions = self.get_cost_functions()
        config_options = self._get_configuration_options()

        # check retry policy.  Rather ugly use of instance_uuids[0]...
        # but if we've exceeded max retries... then we really only
        # have a single instance.
        properties = instance_properties.copy()
        if instance_uuids:
            properties['uuid'] = instance_uuids[0]
        self._populate_retry(filter_properties, properties)

        filter_properties.update({'context': context,
                                  'request_spec': request_spec,
                                  'config_options': config_options,
                                  'instance_type': instance_type})

        self.populate_filter_properties(request_spec,
                                        filter_properties)

        # Find our local list of acceptable hosts by repeatedly
        # filtering and weighing our options. Each time we choose a
        # host, we virtually consume resources on it so subsequent
        # selections can adjust accordingly.

        # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
        unfiltered_hosts_dict = self.host_manager.get_all_host_states(
                elevated, topic)

        # Note: remember, we are using an iterator here. So only
        # traverse this list once. This can bite you if the hosts
        # are being scanned in a filter or weighing function.
        hosts = unfiltered_hosts_dict.itervalues()

        selected_hosts = []
        if instance_uuids:
            num_instances = len(instance_uuids)
        else:
            num_instances = request_spec.get('num_instances', 1)
        for num in xrange(num_instances):
            # Filter local hosts based on requirements ...
            if FLAGS.use_network_qos:
                if instance_uuids:
                    inst_uuid = instance_uuids[num]
                else:
                    inst_uuid = None
                qos_info = resource_tracker.get_instance_network_qos(
                                            instance_type, inst_uuid)
                instance_properties['private_network_bandwith'] =\
                                                  qos_info['private_qos']
                instance_properties['public_network_bandwith'] =\
                                                  qos_info['public_qos']
                filter_properties.update({'private_network_bandwith':
                                           qos_info['private_qos'],
                                          'public_network_bandwith':
                                           qos_info['public_qos']
                                         })

            hosts = self.host_manager.filter_hosts(hosts,
                    filter_properties)
            if not hosts:
                # Can't get any more locally.
                break

            LOG.debug(_("Filtered %(hosts)s") % locals())

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            # TODO(comstud): filter_properties will also be used for
            # weighing and I plan fold weighing into the host manager
            # in a future patch.  I'll address the naming of this
            # variable at that time.
            weighted_host = least_cost.weighted_sum(cost_functions,
                    hosts, filter_properties)
            LOG.debug(_("Weighted %(weighted_host)s") % locals())
            selected_hosts.append(weighted_host)

            # Now consume the resources so the filter/weights
            # will change for the next instance.
            weighted_host.host_state.consume_from_instance(
                    instance_properties, instance_type)

        selected_hosts.sort(key=operator.attrgetter('weight'))
        return selected_hosts