def _partition_by_atoms(
            self, vertices, n_atoms, max_atoms_per_core, subgraph, graph,
            graph_to_subgraph_mapper, resource_tracker):
        """ Try to partition subvertices on how many atoms it can fit on\
            each subvert

        :param vertices: the vertexes that need to be partitioned at the same \
                    time
        :type vertices: iterable list of\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param n_atoms: the atoms of the first vertex
        :type n_atoms: int
        :param max_atoms_per_core: the max atoms from all the vertexes\
                    considered that have max_atom constraints
        :type max_atoms_per_core: int
        :param subgraph: the partitioned_graph of the problem space to put\
                    subverts in
        :type subgraph: :py:class:`pacman.model.subgraph.subgraph.Subgraph`
        :param graph: the partitionable_graph object
        :type graph:\
                    :py:class:`pacman.model.partitionable_graph.partitionable_graph.PartitionableGraph`
        :param graph_to_subgraph_mapper: the mapper from\
                    partitionable_graph to partitioned_graph
        :type graph_to_subgraph_mapper:\
                    py:class:'pacman.modelgraph_subgraph_mapper.graph_mapper.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
                    :py:class:`pacman.utilities.resource_tracker.ResourceTracker`
        :type no_machine_time_steps: int
        """
        n_atoms_placed = 0
        while n_atoms_placed < n_atoms:

            lo_atom = n_atoms_placed
            hi_atom = lo_atom + max_atoms_per_core - 1
            if hi_atom >= n_atoms:
                hi_atom = n_atoms - 1

            # Scale down the number of atoms to fit the available resources
            used_placements, hi_atom = self._scale_down_resources(
                lo_atom, hi_atom, vertices, resource_tracker,
                max_atoms_per_core, graph)

            # Update where we are
            n_atoms_placed = hi_atom + 1

            # Create the subvertices
            for (vertex, used_resources) in used_placements:
                vertex_slice = Slice(lo_atom, hi_atom)
                subvertex = vertex.create_subvertex(
                    vertex_slice, used_resources,
                    "{}:{}:{}".format(vertex.label, lo_atom, hi_atom),
                    partition_algorithm_utilities.get_remaining_constraints(
                        vertex))

                # update objects
                subgraph.add_subvertex(subvertex)
                graph_to_subgraph_mapper.add_subvertex(
                    subvertex, vertex_slice, vertex)
    def _reallocate_resources(
            used_placements, resource_tracker, lo_atom, hi_atom, graph):
        """ readjusts resource allocation and updates the placement list to\
            take into account the new layout of the atoms

        :param used_placements: the original list of tuples containing\
                    placement data
        :type used_placements: iterable of tuples
        :param resource_tracker: the tracker of resources
        :type resource_tracker:\
                    :py:class:`pacman.utilities.resource_tracker.ResourceTracker`
        :param lo_atom: the low atom of a slice to be considered
        :type lo_atom: int
        :param hi_atom: the high atom of a slice to be considered
        :type hi_atom: int
        :param graph: the partitionable graph used by the partitioner
        :type graph:
                    :py:class:`pacman.model.partitionable_graph.partitionable_graph.PartitionableGraph`
        :return: the new list of tuples containing placement data
        :rtype: iterable of tuples
        """

        new_used_placements = list()
        for (placed_vertex, x, y, p, placed_resources,
                ip_tags, reverse_ip_tags) in used_placements:

            # Deallocate the existing resources
            resource_tracker.unallocate_resources(
                x, y, p, placed_resources, ip_tags, reverse_ip_tags)

            # Get the new resource usage
            vertex_slice = Slice(lo_atom, hi_atom)
            new_resources = placed_vertex.get_resources_used_by_atoms(
                vertex_slice, graph)

            # Re-allocate the existing resources
            (x, y, p, ip_tags, reverse_ip_tags) = \
                resource_tracker.allocate_constrained_resources(
                    new_resources, placed_vertex.constraints)
            new_used_placements.append(
                (placed_vertex, x, y, p, new_resources, ip_tags,
                 reverse_ip_tags))
        return new_used_placements
Example #3
0
 def test_immutability_as_slice(self):
     s = Slice(0, 10)
     with self.assertRaises(AttributeError):
         s.as_slice = slice(2, 10)
Example #4
0
 def test_immutability_n_atoms(self):
     s = Slice(0, 10)
     with self.assertRaises(AttributeError):
         s.n_atoms = 3
Example #5
0
 def test_immutability_hi_atom(self):
     s = Slice(0, 10)
     with self.assertRaises(AttributeError):
         s.hi_atom = 3
    def _scale_up_resource_usage(
            self, used_resources, hi_atom, lo_atom, max_atoms_per_core, vertex,
            resources, ratio, graph):
        """ Try to push up the number of atoms in a subvertex to be as close\
            to the available resources as possible

        :param used_resources: the resources used by the machine so far
        :type used_resources:\
                    :py:class:`pacman.model.resources.resource.Resource`
        :param hi_atom: the total number of atoms to place for this vertex
        :type hi_atom: int
        :param lo_atom: the number of atoms already partitioned
        :type lo_atom: int
        :param max_atoms_per_core: the min max atoms from all the vertexes \
                    considered that have max_atom constraints
        :type max_atoms_per_core: int
        :param vertex: the vertexes to scale up the num atoms per core for
        :type vertex:\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param resources: the resource estimate for the vertex for a given\
                    number of atoms
        :type resources:\
                    :py:class:`pacman.model.resources.resource.Resource`
        :param ratio: the ratio between max atoms and available resources
        :type ratio: int
        :return: the new resources used and the new hi_atom
        :rtype: tuple of\
                    (:py:class:`pacman.model.resources.resource.Resource`,\
                    int)
        """

        previous_used_resources = used_resources
        previous_hi_atom = hi_atom

        # Keep searching while the ratio is still in range,
        # the next hi_atom value is still less than the number of atoms,
        # and the number of atoms is less than the constrained number of atoms
        while ((ratio < 1.0) and ((hi_atom + 1) < vertex.n_atoms) and
                ((hi_atom - lo_atom + 2) < max_atoms_per_core)):

            # Update the hi_atom, keeping track of the last hi_atom which
            # resulted in a ratio < 1.0
            previous_hi_atom = hi_atom
            hi_atom += 1

            # Find the new resource usage, keeping track of the last usage
            # which resulted in a ratio < 1.0
            previous_used_resources = used_resources
            vertex_slice = Slice(lo_atom, hi_atom)
            used_resources = vertex.get_resources_used_by_atoms(
                vertex_slice, graph)
            ratio = self._find_max_ratio(used_resources, resources)

        # If we have managed to fit everything exactly (unlikely but possible),
        # return the matched resources and high atom count
        if ratio == 1.0:
            return used_resources, hi_atom

        # At this point, the ratio > 1.0, so pick the last allocation of
        # resources, which will be < 1.0
        return previous_used_resources, previous_hi_atom
    def _scale_down_resources(
            self, lo_atom, hi_atom, vertices, resource_tracker,
            max_atoms_per_core, graph):
        """ Reduce the number of atoms on a core so that it fits within the
            resources available.

        :param lo_atom: the number of atoms already partitioned
        :type lo_atom: int
        :param hi_atom: the total number of atoms to place for this vertex
        :type hi_atom: int
        :param vertices: the vertexes that need to be partitioned at the same \
                    time
        :type vertices: iterable of\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param max_atoms_per_core: the min max atoms from all the vertexes \
                    considered that have max_atom constraints
        :type max_atoms_per_core: int
        :param graph: the partitionable_graph object
        :type graph:\
                    :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph`
        :param resource_tracker: Tracker of used resources
        :type resource_tracker: spinnmachine.machine.Machine object
        :return: the list of placements made by this method and the new amount\
                    of atoms partitioned
        :rtype: tuple of (iterable of tuples, int)
        :raise PacmanPartitionException: when the vertex cannot be partitioned
        """
        used_placements = list()

        # Find the number of atoms that will fit in each vertex given the
        # resources available
        min_hi_atom = hi_atom
        for i in range(len(vertices)):
            vertex = vertices[i]

            # get max resources available on machine
            resources = \
                resource_tracker.get_maximum_constrained_resources_available(
                    vertex.constraints)

            # get resources used by vertex
            vertex_slice = Slice(lo_atom, hi_atom)
            used_resources = vertex.get_resources_used_by_atoms(
                vertex_slice, graph)

            # Work out the ratio of used to available resources
            ratio = self._find_max_ratio(used_resources, resources)

            while ratio > 1.0 and hi_atom >= lo_atom:

                # Scale the resources by the ratio
                old_n_atoms = (hi_atom - lo_atom) + 1
                new_n_atoms = int(float(old_n_atoms) / (ratio * 1.1))

                # Avoid infinite looping
                if old_n_atoms == new_n_atoms:
                    new_n_atoms -= 1

                # Find the new resource usage
                hi_atom = lo_atom + new_n_atoms - 1
                if hi_atom >= lo_atom:
                    vertex_slice = Slice(lo_atom, hi_atom)
                    used_resources = vertex.get_resources_used_by_atoms(
                        vertex_slice, graph)
                    ratio = self._find_max_ratio(used_resources, resources)

            # If we couldn't partition, raise an exception
            if hi_atom < lo_atom:
                raise exceptions.PacmanPartitionException(
                    "No more of vertex {} would fit on the board:\n"
                    "    Allocated so far: {} atoms\n"
                    "    Request for SDRAM: {}\n"
                    "    Largest SDRAM space: {}".format(
                        vertex, lo_atom - 1,
                        used_resources.sdram.get_value(),
                        resources.sdram.get_value()))

            # Try to scale up until just below the resource usage
            used_resources, hi_atom = self._scale_up_resource_usage(
                used_resources, hi_atom, lo_atom, max_atoms_per_core, vertex,
                resources, ratio, graph)

            # If this hi_atom is smaller than the current minimum, update the
            # other placements to use (hopefully) less resources
            if hi_atom < min_hi_atom:
                min_hi_atom = hi_atom
                used_placements = self._reallocate_resources(
                    used_placements, resource_tracker, lo_atom, hi_atom, graph)

            # Attempt to allocate the resources for this vertex on the machine
            try:
                (x, y, p, ip_tags, reverse_ip_tags) = \
                    resource_tracker.allocate_constrained_resources(
                        used_resources, vertex.constraints)
                used_placements.append(
                    (vertex, x, y, p, used_resources,
                     ip_tags, reverse_ip_tags))
            except exceptions.PacmanValueError as e:

                raise exceptions.PacmanValueError(
                    "Unable to allocate requested resources to"
                    " vertex {}:\n{}".format(vertex, e))

        # reduce data to what the parent requires
        final_placements = list()
        for (vertex, _, _, _, used_resources, _, _) in used_placements:
            final_placements.append((vertex, used_resources))

        return final_placements, min_hi_atom