def _check_masks_are_correct(partition):
    """ Check that the masks between a fixed mask constraint and a fixed_field\
        constraint. Raises error if not.

    :param AbstractSingleSourcePartition partition:
        the outgoing_edge_partition to search for these constraints
    :raise PacmanInvalidParameterException: if the masks are incompatible
    """
    fixed_mask = locate_constraints_of_type(
        partition.constraints, FixedMaskConstraint)[0]
    fixed_field = locate_constraints_of_type(
        partition.constraints, FixedKeyFieldConstraint)[0]
    mask = fixed_mask.mask
    for field in fixed_field.fields:
        if field.mask & mask != field.mask:
            raise PacmanInvalidParameterException(
                "field.mask, mask",
                "The field mask {} is outside of the mask {}".format(
                    field.mask, mask),
                "{}:{}".format(field.mask, mask))
        for other_field in fixed_field.fields:
            if other_field != field and other_field.mask & field.mask != 0:
                raise PacmanInvalidParameterException(
                    "field.mask, mask",
                    "Field masks {} and {} overlap".format(
                        field.mask, other_field.mask),
                    "{}:{}".format(field.mask, mask))
def _check_masks_are_correct(partition):
    """ Check that the masks between a fixed mask constraint\
        and a fixed_field constraint. completes if its correct, raises error\
        otherwise

    :param partition: \
        the outgoing_edge_partition to search for these constraints
    :rtype: None:
    """
    fixed_mask = locate_constraints_of_type(partition.constraints,
                                            FixedMaskConstraint)[0]
    fixed_field = locate_constraints_of_type(partition.constraints,
                                             FixedKeyFieldConstraint)[0]
    mask = fixed_mask.mask
    for field in fixed_field.fields:
        if field.mask & mask != field.mask:
            raise PacmanInvalidParameterException(
                "field.mask, mask",
                "The field mask {} is outside of the mask {}".format(
                    field.mask, mask), "{}:{}".format(field.mask, mask))
        for other_field in fixed_field.fields:
            if other_field != field and other_field.mask & field.mask != 0:
                raise PacmanInvalidParameterException(
                    "field.mask, mask", "Field masks {} and {} overlap".format(
                        field.mask, other_field.mask),
                    "{}:{}".format(field.mask, mask))
def _check_masks_are_correct(partition):
    """ Check that the masks between a fixed mask constraint\
        and a fixed_field constraint. completes if its correct, raises error\
        otherwise

    :param partition: \
        the outgoing_edge_partition to search for these constraints
    :rtype: None:
    """
    fixed_mask = locate_constraints_of_type(
        partition.constraints, FixedMaskConstraint)[0]
    fixed_field = locate_constraints_of_type(
        partition.constraints, FixedKeyFieldConstraint)[0]
    mask = fixed_mask.mask
    for field in fixed_field.fields:
        if field.mask & mask != field.mask:
            raise PacmanInvalidParameterException(
                "field.mask, mask",
                "The field mask {} is outside of the mask {}".format(
                    field.mask, mask),
                "{}:{}".format(field.mask, mask))
        for other_field in fixed_field.fields:
            if other_field != field and other_field.mask & field.mask != 0:
                raise PacmanInvalidParameterException(
                    "field.mask, mask",
                    "Field masks {} and {} overlap".format(
                        field.mask, other_field.mask),
                    "{}:{}".format(field.mask, mask))
def determine_max_atoms_for_vertex(vertex):
    """  returns the max atom constraint after assessing them all.

    :param ApplicationVertex vertex: the vertex to find max atoms of
    :return: the max number of atoms per core
    :rtype: int
    """
    possible_max_atoms = list()
    n_atoms = None
    max_atom_constraints = utils.locate_constraints_of_type(
        vertex.constraints, MaxVertexAtomsConstraint)
    for constraint in max_atom_constraints:
        possible_max_atoms.append(constraint.size)
    n_atom_constraints = utils.locate_constraints_of_type(
        vertex.constraints, FixedVertexAtomsConstraint)
    for constraint in n_atom_constraints:
        if n_atoms is not None and constraint.size != n_atoms:
            raise PacmanPartitionException(
                CONTRADICTORY_FIXED_ATOM_ERROR.format(n_atoms,
                                                      constraint.size))
        n_atoms = constraint.size
    if len(possible_max_atoms) != 0:
        return int(min(possible_max_atoms))
    else:
        return vertex.n_atoms
Exemplo n.º 5
0
 def _merge_constraints(subvertex_list):
     merged_placement = None
     ip_tag = list()
     reverse_ip_tags = list()
     for subvertex in subvertex_list:
         place_constraints = utility_calls.locate_constraints_of_type(
             subvertex.constraints, PlacerChipAndCoreConstraint)
         ip_tag_constraints = utility_calls.locate_constraints_of_type(
             subvertex.constraints, TagAllocatorRequireIptagConstraint)
         ip_tag.extend(ip_tag_constraints)
         reverse_ip_tag = utility_calls.locate_constraints_of_type(
             subvertex.constraints,
             TagAllocatorRequireReverseIptagConstraint)
         reverse_ip_tags.extend(reverse_ip_tag)
         if len(place_constraints) != 0:
             for place_constraint in place_constraints:
                 if merged_placement is None:
                     merged_placement = place_constraint
                 else:
                     x_level = merged_placement.x == \
                         place_constraint.x
                     y_level = merged_placement.y == \
                         place_constraint.y
                     p_level = merged_placement.p != \
                         place_constraint.p
                     if not x_level or not y_level or not p_level:
                         raise exceptions.PacmanConfigurationException(
                             "can't handle conflicting constraints")
     return merged_placement, ip_tag, reverse_ip_tags
Exemplo n.º 6
0
    def __set_max_atoms_to_splitters(app_graph):
        """ get the constraints sorted out.

        :param ApplicationGraph app_graph: the app graph
        """
        for vertex in app_graph.vertices:
            for constraint in utils.locate_constraints_of_type(
                    vertex.constraints, MaxVertexAtomsConstraint):
                vertex.splitter.set_max_atoms_per_core(constraint.size, False)
            for constraint in utils.locate_constraints_of_type(
                    vertex.constraints, FixedVertexAtomsConstraint):
                vertex.splitter.set_max_atoms_per_core(constraint.size, True)
Exemplo n.º 7
0
    def check_incoming_verts(one_to_one_verts, vertex, partitioned_graph,
                             found_list):
        """ Adds subverts which have a one to one connection
        :param one_to_one_verts: the list of sorted vertices
        :param vertex: the destination vertex
        :param partitioned_graph: the partitioned graph
        :param found_list: the list of found vertices so far
        :return:
        """

        # locate incoming edges for this vertex
        incoming_edges = \
            partitioned_graph.incoming_subedges_from_subvertex(vertex)

        # locate constraints of chip and core for this vertex
        chip_constraint = utility_calls.locate_constraints_of_type(
            vertex.constraints, PlacerChipAndCoreConstraint)

        for incoming_edge in incoming_edges:
            incoming_vert = incoming_edge.pre_subvertex
            number_of_outgoing_edges = partitioned_graph.\
                outgoing_subedges_from_subvertex(incoming_vert)

            # if only one outgoing edge, decide to put it in same chip pile
            if (len(number_of_outgoing_edges) == 1
                    and incoming_vert not in found_list):

                # if the vertex has no constraint, put in
                if len(chip_constraint) != 0:
                    one_to_one_verts.append(incoming_vert)
                    found_list.append(incoming_vert)
                else:  # if constraint exists, try to satisfy constraints.
                    chip_constraint_incoming = \
                        utility_calls.locate_constraints_of_type(
                            incoming_vert.constraints,
                            PlacerChipAndCoreConstraint)
                    if len(chip_constraint_incoming) == 0:
                        one_to_one_verts.append(incoming_vert)
                        found_list.append(incoming_vert)
                    else:
                        x_level = chip_constraint[0].x == \
                            chip_constraint_incoming[0].x
                        y_level = chip_constraint[0].y == \
                            chip_constraint_incoming[0].y
                        p_level = chip_constraint[0].p != \
                            chip_constraint_incoming[0].p
                        if x_level and y_level and p_level:
                            one_to_one_verts.append(incoming_vert)
                            found_list.append(incoming_vert)
def get_fixed_mask(same_key_group):
    """ Get a fixed mask from a group of edges if a\
        :py:class:`FixedMaskConstraint`\
        constraint exists in any of the edges in the group.

    :param iterable(MachineEdge) same_key_group:
        Set of edges that are to be assigned the same keys and masks
    :return: The fixed mask if found, or None
    :rtype: tuple(int or None, iterable(Field) or None)
    :raise PacmanValueError: If two edges conflict in their requirements
    """
    mask = None
    fields = None
    edge_with_mask = None
    for edge in same_key_group:
        for constraint in locate_constraints_of_type(
                edge.constraints, FixedMaskConstraint):
            if mask is not None and mask != constraint.mask:
                raise PacmanValueError(
                    "Two Edges {} and {} must have the same key and mask, "
                    "but have different fixed masks, {} and {}".format(
                        edge, edge_with_mask, mask, constraint.mask))
            if (fields is not None and constraint.fields is not None and
                    fields != constraint.fields):
                raise PacmanValueError(
                    "Two Edges {} and {} must have the same key and mask, "
                    "but have different field ranges".format(
                        edge, edge_with_mask))
            mask = constraint.mask
            edge_with_mask = edge
            if constraint.fields is not None:
                fields = constraint.fields

    return mask, fields
    def _locate_connected_chip_data(vertex, machine):
        """ Finds the connected virtual chip

        :param vertex:
        :param machine:
        :return:
        """
        # locate the chip from the placement constraint
        placement_constraint = utility_calls.locate_constraints_of_type(
            vertex.constraints, PlacerChipAndCoreConstraint)
        router = machine.get_chip_at(placement_constraint.x,
                                     placement_constraint.y).router
        found_link = False
        link_id = 0
        while not found_link or link_id < 5:
            if router.is_link(link_id):
                found_link = True
            else:
                link_id += 1
        if not found_link:
            raise exceptions.PacmanConfigurationException(
                "Can't find the real chip this virtual chip is connected to."
                "Please fix and try again.")
        else:
            return ("[{}, {}]".format(
                router.get_link(link_id).destination_x,
                router.get_link(link_id).destination_y),
                    router.get_link(link_id).multicast_default_from)
Exemplo n.º 10
0
    def _locate_connected_chip_data(vertex, machine):
        """ Finds the connected virtual chip

        :param vertex:
        :param machine:
        :return:
        """
        # locate the chip from the placement constraint
        placement_constraint = utility_calls.locate_constraints_of_type(
            vertex.constraints, PlacerChipAndCoreConstraint)
        router = machine.get_chip_at(
            placement_constraint.x, placement_constraint.y).router
        found_link = False
        link_id = 0
        while not found_link or link_id < 5:
            if router.is_link(link_id):
                found_link = True
            else:
                link_id += 1
        if not found_link:
            raise exceptions.PacmanConfigurationException(
                "Can't find the real chip this virtual chip is connected to."
                "Please fix and try again.")
        else:
            return ("[{}, {}]".format(router.get_link(link_id).destination_x,
                                      router.get_link(link_id).destination_y),
                    router.get_link(link_id).multicast_default_from)
    def _locate_vertices_to_partition_now(vertex):
        """ Locate any other vertices that need to be partitioned with the\
            exact same ranges of atoms

        :param vertex: the vertex that is currently being partitioned
        :type vertex:\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :return: iterable of vertexes that need to be partitioned with the\
                    exact same range of atoms
        :rtype: iterable of\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :raise PacmanPartitionException: if the vertices that need to be \
                    partitioned the same have different numbers of atoms
        """
        partition_together_vertices = list()
        partition_together_vertices.append(vertex)
        same_size_vertex_constraints = \
            utility_calls.locate_constraints_of_type(
                vertex.constraints, PartitionerSameSizeAsVertexConstraint)
        for constraint in same_size_vertex_constraints:
            if constraint.vertex.n_atoms != vertex.n_atoms:
                raise exceptions.PacmanPartitionException(
                    "A vertex and its partition-dependent vertices must "
                    "have the same number of atoms")
            else:
                partition_together_vertices.append(constraint.vertex)
        return partition_together_vertices
Exemplo n.º 12
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements):

        # Check for the radial placement constraint
        radial_constraints = utility_calls.locate_constraints_of_type(
            [vertex], PlacerRadialPlacementFromChipConstraint)
        start_x = 0
        start_y = 0
        for constraint in radial_constraints:
            if start_x is None:
                start_x = constraint.x
            elif start_x != constraint.x:
                raise PacmanPlaceException("Non-matching constraints")
            if start_y is None:
                start_y = constraint.y
            elif start_y != constraint.y:
                raise PacmanPlaceException("Non-matching constraints")
        chips = None
        if start_x is not None and start_y is not None:
            chips = self._generate_radial_chips(machine, resource_tracker,
                                                start_x, start_y)

        # Create and store a new placement
        (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
            vertex.resources_required, vertex.constraints, chips)
        placement = Placement(vertex, x, y, p)
        placements.add_placement(placement)
Exemplo n.º 13
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements):

        # Check for the radial placement constraint
        radial_constraints = utility_calls.locate_constraints_of_type(
            [vertex], PlacerRadialPlacementFromChipConstraint)
        start_x = None
        start_y = None
        for constraint in radial_constraints:
            if start_x is None:
                start_x = constraint.x
            elif start_x != constraint.x:
                raise PacmanPlaceException("Non-matching constraints")
            if start_y is None:
                start_y = constraint.y
            elif start_y != constraint.y:
                raise PacmanPlaceException("Non-matching constraints")
        chips = None
        if start_x is not None and start_y is not None:
            chips = self._generate_radial_chips(machine, resource_tracker,
                                                start_x, start_y)

        # Create and store a new placement
        (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
            vertex.resources_required, vertex.constraints, chips)
        placement = Placement(vertex, x, y, p)
        placements.add_placement(placement)
def get_fixed_key_and_mask(same_key_group):
    """ Gets a fixed key and mask from a group of partitioned edges if a\
        :py:class:`pacman.model.constraints.key_allocator_same_key_constraint.KeyAllocatorFixedKeyAndMaskConstraint`\
        constraint exists in any of the edges in the group.

    :param same_key_group: Set of partitioned edges that are to be\
                assigned the same keys and masks
    :type same_key_group: iterable of\
                :py:class:`pacman.model.partitioned_graph.partitioned_edge.PartitionedEdge`
    :raise PacmanValueError: If two edges conflict in their requirements
    """
    keys_and_masks = None
    edge_with_key_and_mask = None
    for edge in same_key_group:
        constraints = utility_calls.locate_constraints_of_type(
            edge.constraints, KeyAllocatorFixedKeyAndMaskConstraint)
        for constraint in constraints:

            # Check for conflicts
            if (keys_and_masks is not None and
                    keys_and_masks != constraint.keys_and_masks):
                raise PacmanValueError(
                    "Two Partitioned Edges {} and {} must have the same"
                    " key and mask, but have different fixed key and"
                    " masks, {} and {}".format(
                        edge, edge_with_key_and_mask, keys_and_masks,
                        constraint.keys_and_masks))
            keys_and_masks = constraint.keys_and_masks
            edge_with_key_and_mask = edge

    return keys_and_masks
Exemplo n.º 15
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements,
                      vertices_on_same_chip):
        vertices = vertices_on_same_chip[vertex]

        # Check for the radial placement constraint
        radial_constraints = locate_constraints_of_type(
            vertices, RadialPlacementFromChipConstraint)
        start_x, start_y = self._get_start(radial_constraints)
        chips = None
        if start_x is not None and start_y is not None:
            chips = self._generate_radial_chips(machine, resource_tracker,
                                                start_x, start_y)

        if len(vertices) > 1:
            assigned_values = \
                resource_tracker.allocate_constrained_group_resources([
                    (vert.resources_required, vert.constraints)
                    for vert in vertices
                ], chips)
            for (x, y, p, _, _), vert in zip(assigned_values, vertices):
                placement = Placement(vert, x, y, p)
                placements.add_placement(placement)
        else:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, chips)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)

        return vertices
    def _locate_vertices_to_partition_now(vertex):
        """ Locate any other vertices that need to be partitioned with the\
            exact same ranges of atoms

        :param vertex: the vertex that is currently being partitioned
        :type vertex:\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :return: iterable of vertexes that need to be partitioned with the\
                    exact same range of atoms
        :rtype: iterable of\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :raise PacmanPartitionException: if the vertices that need to be \
                    partitioned the same have different numbers of atoms
        """
        partition_together_vertices = list()
        partition_together_vertices.append(vertex)
        same_size_vertex_constraints = \
            utility_calls.locate_constraints_of_type(
                vertex.constraints, PartitionerSameSizeAsVertexConstraint)
        for constraint in same_size_vertex_constraints:
            if constraint.vertex.n_atoms != vertex.n_atoms:
                raise exceptions.PacmanPartitionException(
                    "A vertex and its partition-dependent vertices must "
                    "have the same number of atoms")
            else:
                partition_together_vertices.append(constraint.vertex)
        return partition_together_vertices
def check_types_of_edge_constraint(machine_graph):
    """ Go through the graph for operations and checks that the constraints\
        are compatible.

    :param MachineGraph machine_graph: the graph to search through
    :raises PacmanConfigurationException: if a problem is found
    """
    for partition in machine_graph.outgoing_edge_partitions:
        if partition.traffic_type != EdgeTrafficType.MULTICAST:
            continue
        fixed_key = locate_constraints_of_type(
            partition.constraints, FixedKeyAndMaskConstraint)
        fixed_mask = locate_constraints_of_type(
            partition.constraints, FixedMaskConstraint)
        fixed_field = locate_constraints_of_type(
            partition.constraints, FixedKeyFieldConstraint)

        if len(fixed_key) > 1 or len(fixed_field) > 1 or len(fixed_mask) > 1:
            raise PacmanConfigurationException(
                "There are multiple constraint of the same type on partition "
                "{} starting at {}. Please fix and try again.".format(
                    partition.identifier, partition.pre_vertex))

        fixed_key = len(fixed_key) == 1
        fixed_mask = len(fixed_mask) == 1
        fixed_field = len(fixed_field) == 1

        # check for fixed key and a fixed mask. as these should have been
        # merged before now
        if fixed_key and fixed_mask:
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a fixed key and fixed "
                "mask constraint. These can be merged together, but is "
                "deemed an error here".format(
                    partition.identifer, partition.pre_vertex))

        # check for a fixed key and fixed field, as these are incompatible
        if fixed_key and fixed_field:
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a fixed key and fixed "
                "field constraint. These may be merge-able together, but is "
                "deemed an error here".format(
                    partition.identifer, partition.pre_vertex))

        # check that a fixed mask and fixed field have compatible masks
        if fixed_mask and fixed_field:
            _check_masks_are_correct(partition)
Exemplo n.º 18
0
    def _compute_atoms_per_core(self, vertex, res_tracker, plan_n_timesteps):
        """ Work out how many atoms per core are required for the given\
            vertex. Assumes that the first atom of the vertex is fully\
            representative.

        :rtype: float
        """
        # Get the usage of the first atom, then assume that this will be the
        # usage of all the atoms.
        requirements = vertex.get_resources_used_by_atoms(Slice(0, 1))

        # Locate the maximum resources available
        limits = res_tracker.get_maximum_constrained_resources_available(
            requirements, vertex.constraints)

        # Find the ratio of each of the resources - if 0 is required,
        # assume the ratio is the max available
        atoms_per_sdram = self._get_ratio(
            limits.sdram.get_total_sdram(plan_n_timesteps),
            requirements.sdram.get_total_sdram(plan_n_timesteps))
        atoms_per_dtcm = self._get_ratio(
            limits.dtcm.get_value(), requirements.dtcm.get_value())
        atoms_per_cpu = self._get_ratio(
            limits.cpu_cycles.get_value(), requirements.cpu_cycles.get_value())

        n_atoms = None
        for fa_constraint in utility_calls.locate_constraints_of_type(
                vertex.constraints, FixedVertexAtomsConstraint):
            if n_atoms is not None and n_atoms != fa_constraint.size:
                raise PacmanPartitionException(
                    "Vertex has multiple contradictory fixed atom constraints"
                    " - cannot be both {} and {}".format(
                        n_atoms, fa_constraint.size))
            n_atoms = fa_constraint.size

        max_atom_values = [atoms_per_sdram, atoms_per_dtcm, atoms_per_cpu]
        for max_atom_constraint in utility_calls.locate_constraints_of_type(
                vertex.constraints, MaxVertexAtomsConstraint):
            max_atom_values.append(float(max_atom_constraint.size))
        max_atoms = min(max_atom_values)

        if n_atoms is not None and max_atoms < n_atoms:
            raise PacmanPartitionException(
                "Max size of {} is incompatible with fixed size of {}".format(
                    max_atoms, n_atoms))

        return n_atoms if n_atoms is not None else max_atoms
Exemplo n.º 19
0
    def _compute_atoms_per_core(self, vertex, res_tracker):
        """ Work out how many atoms per core are required for the given\
            vertex. Assumes that the first atom of the vertex is fully\
            representative.

        :rtype: float
        """
        # Get the usage of the first atom, then assume that this will be the
        # usage of all the atoms.
        requirements = vertex.get_resources_used_by_atoms(Slice(0, 1))

        # Locate the maximum resources available
        limits = res_tracker.get_maximum_constrained_resources_available(
            requirements, vertex.constraints)

        # Find the ratio of each of the resources - if 0 is required,
        # assume the ratio is the max available
        atoms_per_sdram = self._get_ratio(limits.sdram.get_value(),
                                          requirements.sdram.get_value())
        atoms_per_dtcm = self._get_ratio(limits.dtcm.get_value(),
                                         requirements.dtcm.get_value())
        atoms_per_cpu = self._get_ratio(limits.cpu_cycles.get_value(),
                                        requirements.cpu_cycles.get_value())

        n_atoms = None
        for fa_constraint in utility_calls.locate_constraints_of_type(
                vertex.constraints, FixedVertexAtomsConstraint):
            if n_atoms is not None and n_atoms != fa_constraint.size:
                raise PacmanPartitionException(
                    "Vertex has multiple contradictory fixed atom constraints"
                    " - cannot be both {} and {}".format(
                        n_atoms, fa_constraint.size))
            n_atoms = fa_constraint.size

        max_atom_values = [atoms_per_sdram, atoms_per_dtcm, atoms_per_cpu]
        for max_atom_constraint in utility_calls.locate_constraints_of_type(
                vertex.constraints, MaxVertexAtomsConstraint):
            max_atom_values.append(float(max_atom_constraint.size))
        max_atoms = min(max_atom_values)

        if n_atoms is not None and max_atoms < n_atoms:
            raise PacmanPartitionException(
                "Max size of {} is incompatible with fixed size of {}".format(
                    max_atoms, n_atoms))

        return n_atoms if n_atoms is not None else max_atoms
 def _exists_equiv_vertex(x, y, graph, vertex_type):
     for vertex in graph.vertices:
         if isinstance(vertex, vertex_type) and any(
                 constraint.x == x and constraint.y == y
                 for constraint in locate_constraints_of_type(
                     vertex.constraints, ChipAndCoreConstraint)):
             return vertex
     return None
Exemplo n.º 21
0
def _check_if_partition_has_continuous_keys(partition):
    """
    :param AbstractSingleSourcePartition partition:
    :rtype: bool
    """
    continuous_constraints = locate_constraints_of_type(
        partition.constraints, ContiguousKeyRangeContraint)
    # TODO: Can we do better here?
    return len(continuous_constraints) > 0
    def _determine_groups(self, machine_graph, graph_mapper, graph,
                          n_keys_map, progress):
        check_types_of_edge_constraint(machine_graph)

        for partition in progress.over(
                machine_graph.outgoing_edge_partitions, False):
            fixed_key_constraints = locate_constraints_of_type(
                partition.constraints, FixedKeyAndMaskConstraint)
            fixed_mask_constraints = locate_constraints_of_type(
                partition.constraints, FixedMaskConstraint)
            fixed_field_constraints = locate_constraints_of_type(
                partition.constraints, FixedKeyFieldConstraint)

            if (not fixed_key_constraints and
                    not fixed_mask_constraints and
                    not fixed_field_constraints):
                self.add_field_constraints(
                    partition, graph_mapper, graph, n_keys_map)
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(
                    vertex.constraints, AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(
            machine_graph.n_vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(
                max_connected_vertex, machine_graph, unconstrained,
                machine, placements, resource_tracker, progress)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
    def _run(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: A set of placements
        :rtype: ~pacman.model.placements.Placements
        :raise PacmanPlaceException:
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements,
                               vertices_on_same_chip, machine_graph)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress,
                                               vertices_on_same_chip)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
    def _partition_vertex(
            self, vertex, subgraph, graph_to_subgraph_mapper, resource_tracker,
            graph):
        """ Partition a single vertex

        :param vertex: the vertex to partition
        :type vertex:\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param subgraph: the partitioned_graph to add subverts to
        :type subgraph:\
                    py:class:`pacman.model.partitioned_graph.partitioned_graph.Subgraph`
        :param graph_to_subgraph_mapper: the mappings object from\
                    partitionable_graph to partitioned_graph which needs to be\
                    updated with new subverts
        :type graph_to_subgraph_mapper:\
                    py:class:'pacman.modelgraph_subgraph_mapper.graph_mapper.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
                    :py:class:`pacman.utilities.resource_tracker.ResourceTracker`
        :param graph: the partitionable_graph object
        :type graph:\
                    :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph`
        :return: None
        :rtype: None
        :raise pacman.exceptions.PacmanPartitionException: if the extra vertex\
                    for partitioning identically has a different number of\
                    atoms than its counterpart.
        """

        partiton_together_vertices = \
            self._locate_vertices_to_partition_now(vertex)

        # locate max atoms per core
        possible_max_atoms = list()
        possible_max_atoms.append(vertex.get_max_atoms_per_core())

        for other_partitionable_vertex in partiton_together_vertices:
            max_atom_constraints =\
                utility_calls.locate_constraints_of_type(
                    other_partitionable_vertex.constraints,
                    PartitionerMaximumSizeConstraint)
            for constraint in max_atom_constraints:
                possible_max_atoms.append(constraint.size)
        max_atoms_per_core = min(possible_max_atoms)

        # partition by atoms
        self._partition_by_atoms(
            partiton_together_vertices, vertex.n_atoms, max_atoms_per_core,
            subgraph, graph, graph_to_subgraph_mapper, resource_tracker)
    def _partition_vertex(self, vertex, subgraph, graph_to_subgraph_mapper,
                          resource_tracker, graph):
        """ Partition a single vertex

        :param vertex: the vertex to partition
        :type vertex:\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param subgraph: the partitioned_graph to add subverts to
        :type subgraph:\
                    py:class:`pacman.model.partitioned_graph.partitioned_graph.Subgraph`
        :param graph_to_subgraph_mapper: the mappings object from\
                    partitionable_graph to partitioned_graph which needs to be\
                    updated with new subverts
        :type graph_to_subgraph_mapper:\
                    py:class:'pacman.modelgraph_subgraph_mapper.graph_mapper.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
                    :py:class:`pacman.utilities.resource_tracker.ResourceTracker`
        :param graph: the partitionable_graph object
        :type graph:\
                    :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph`
        :return: None
        :rtype: None
        :raise pacman.exceptions.PacmanPartitionException: if the extra vertex\
                    for partitioning identically has a different number of\
                    atoms than its counterpart.
        """

        partiton_together_vertices = \
            self._locate_vertices_to_partition_now(vertex)

        # locate max atoms per core
        possible_max_atoms = list()
        possible_max_atoms.append(vertex.get_max_atoms_per_core())

        for other_partitionable_vertex in partiton_together_vertices:
            max_atom_constraints =\
                utility_calls.locate_constraints_of_type(
                    other_partitionable_vertex.constraints,
                    PartitionerMaximumSizeConstraint)
            for constraint in max_atom_constraints:
                possible_max_atoms.append(constraint.size)
        max_atoms_per_core = min(possible_max_atoms)

        # partition by atoms
        self._partition_by_atoms(partiton_together_vertices, vertex.n_atoms,
                                 max_atoms_per_core, subgraph, graph,
                                 graph_to_subgraph_mapper, resource_tracker)
Exemplo n.º 27
0
    def _sort_vertices_for_one_to_one_connection(self, partitioned_graph):
        """

        :param partitioned_graph: the partitioned graph of this application
        :return: list of sorted vertices
        """
        sorted_vertices = list()
        found_list = list()

        # order subverts based on constraint priority
        ordered_subverts = utility_calls.sort_objects_by_constraint_authority(
            partitioned_graph.subvertices)

        for vertex in ordered_subverts:
            incoming_edges = \
                partitioned_graph.incoming_subedges_from_subvertex(vertex)

            # do search if not already added and has incoming edges
            if vertex not in found_list and len(incoming_edges) != 0:
                chip_constraint = utility_calls.locate_constraints_of_type(
                    vertex.constraints, PlacerChipAndCoreConstraint)

                # if has constraint, add first then add incoming
                if len(chip_constraint) != 0:
                    one_to_one_incoming_edges = list()
                    one_to_one_incoming_edges.append(vertex)
                    sorted_vertices.append(one_to_one_incoming_edges)
                    found_list.append(vertex)
                    self.check_incoming_verts(one_to_one_incoming_edges,
                                              vertex, partitioned_graph,
                                              found_list)
                else:  # if no constraint add incoming then first
                    one_to_one_incoming_edges = list()
                    sorted_vertices.append(one_to_one_incoming_edges)
                    self.check_incoming_verts(one_to_one_incoming_edges,
                                              vertex, partitioned_graph,
                                              found_list)
                    one_to_one_incoming_edges.append(vertex)
                    found_list.append(vertex)

        # locate vertices which have no output or input, and add them for
        # placement
        for vertex in ordered_subverts:
            if vertex not in found_list:
                listed_vertex = list()
                listed_vertex.append(vertex)
                sorted_vertices.append(listed_vertex)
        return sorted_vertices
def is_contiguous_range(same_key_group):
    """ Determine if any edge in the group has a\
        :py:class:`pacman.model.constraints.key_allocator_contiguous_range_constraint.KeyAllocatorContiguousRangeContraint`

    :param same_key_group: Set of partitioned edges that are to be\
                assigned the same keys and masks
    :type same_key_group: iterable of\
                :py:class:`pacman.model.partitioned_graph.partitioned_edge.PartitionedEdge`
    :return: True if the range should be contiguous
    """
    for edge in same_key_group:
        constraints = utility_calls.locate_constraints_of_type(
            edge.constraints, KeyAllocatorContiguousRangeContraint)
        if len(constraints) > 0:
            return True
    return False
 def _get_key_constraints(outgoing_partition):
     """ locate 
     
     :param outgoing_partition: the outgoing partition to find constraints 
     for
     :return: the nengo key constraints holder or None if none exist
     """
     if isinstance(
             outgoing_partition.pre_vertex, AbstractNengoMachineVertex):
         outgoing_partition_constraints = \
             outgoing_partition.pre_vertex.\
             get_outgoing_partition_constraints(outgoing_partition)
         return utility_calls.locate_constraints_of_type(
             constraints=outgoing_partition_constraints,
             constraint_type=NengoKeyConstraints)[0]
     else:
         raise Exception(
             "this outgoing partition has no nengo key constraints. "
             "Dont know how to handle this")
    def _locate_connected_chip_data(vertex, machine):
        """ Finds the connected virtual chip

        :param vertex:
        :param machine:
        """
        # locate the chip from the placement constraint
        placement_constraints = locate_constraints_of_type(
            vertex.constraints, ChipAndCoreConstraint)
        routers = (
            machine.get_chip_at(constraint.x, constraint.y).router
            for constraint in placement_constraints)
        links = (
            router.get_link(i)
            for router in routers for i in range(6) if router.is_link(i))
        link = next(iter(links), None)
        if link is None:
            raise PacmanConfigurationException(
                "Can't find the real chip this virtual chip is connected to."
                "Please fix and try again.")
        return ([link.destination_x, link.destination_y],
                link.multicast_default_from)
Exemplo n.º 31
0
    def _locate_connected_chip_data(vertex, machine):
        """ Finds the connected virtual chip

        :param vertex:
        :param machine:
        """
        # locate the chip from the placement constraint
        placement_constraints = utility_calls.locate_constraints_of_type(
            vertex.constraints, ChipAndCoreConstraint)
        routers = (
            machine.get_chip_at(constraint.x, constraint.y).router
            for constraint in placement_constraints)
        links = (
            router.get_link(i)
            for router in routers for i in range(6) if router.is_link(i))
        link = next(iter(links), None)
        if link is None:
            raise PacmanConfigurationException(
                "Can't find the real chip this virtual chip is connected to."
                "Please fix and try again.")
        return ([link.destination_x, link.destination_y],
                link.multicast_default_from)
def get_fixed_mask(same_key_group):
    """ Get a fixed mask from a group of partitioned edges if a\
        :py:class:`pacman.model.constraints.key_allocator_same_key_constraint.KeyAllocatorFixedMaskConstraint`\
        constraint exists in any of the edges in the group.

    :param same_key_group: Set of partitioned edges that are to be\
                assigned the same keys and masks
    :type same_key_group: iterable of\
                :py:class:`pacman.model.partitioned_graph.partitioned_edge.PartitionedEdge`
    :return: The fixed mask if found, or None
    :raise PacmanValueError: If two edges conflict in their requirements
    """
    mask = None
    fields = None
    edge_with_mask = None
    for edge in same_key_group:
        fixed_mask_constraints = utility_calls.locate_constraints_of_type(
            edge.constraints, KeyAllocatorFixedMaskConstraint)
        for fixed_mask_constraint in fixed_mask_constraints:
            if mask is not None and mask != fixed_mask_constraint.mask:
                raise PacmanValueError(
                    "Two Partitioned Edges {} and {} must have the same"
                    " key and mask, but have different fixed masks,"
                    " {} and {}".format(edge, edge_with_mask, mask,
                                        fixed_mask_constraint.mask))
            if (fields is not None and
                    fixed_mask_constraint.fields is not None and
                    fields != fixed_mask_constraint.fields):
                raise PacmanValueError(
                    "Two Partitioned Edges {} and {} must have the same"
                    " key and mask, but have different field ranges"
                    .format(edge, edge_with_mask))
            mask = fixed_mask_constraint.mask
            edge_with_mask = edge
            if fixed_mask_constraint.fields is not None:
                fields = fixed_mask_constraint.fields

    return mask, fields
Exemplo n.º 33
0
    def __call__(self, machine_graph, machine):

        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
def get_fixed_mask(same_key_group):
    """ Get a fixed mask from a group of edges if a\
        :py:class:`pacman.model.constraints.key_allocator_constraints.FixedMaskConstraint`\
        constraint exists in any of the edges in the group.

    :param same_key_group: \
        Set of edges that are to be assigned the same keys and masks
    :type same_key_group: \
        iterable(:py:class:`pacman.model.graphs.machine.MachineEdge`)
    :return: The fixed mask if found, or None
    :raise PacmanValueError: If two edges conflict in their requirements
    """
    mask = None
    fields = None
    edge_with_mask = None
    for edge in same_key_group:
        fixed_mask_constraints = locate_constraints_of_type(
            edge.constraints, FixedMaskConstraint)
        for fixed_mask_constraint in fixed_mask_constraints:
            if mask is not None and mask != fixed_mask_constraint.mask:
                raise PacmanValueError(
                    "Two Edges {} and {} must have the same"
                    " key and mask, but have different fixed masks,"
                    " {} and {}".format(edge, edge_with_mask, mask,
                                        fixed_mask_constraint.mask))
            if (fields is not None and fixed_mask_constraint.fields is not None
                    and fields != fixed_mask_constraint.fields):
                raise PacmanValueError(
                    "Two Edges {} and {} must have the same"
                    " key and mask, but have different field ranges".format(
                        edge, edge_with_mask))
            mask = fixed_mask_constraint.mask
            edge_with_mask = edge
            if fixed_mask_constraint.fields is not None:
                fields = fixed_mask_constraint.fields

    return mask, fields
    def _extract_keys_and_masks_from_bit_field(
            self, partition, bit_field_space, n_keys_map, seen_mask_instances):
        """ Take the bit field space and a partition and locate the keys and\
            masks for that partition given all its constraints

        :param partition: the partition to extract keys and masks for
        :param bit_field_space: the bit field space
        :param n_keys_map: the edge to n_keys map
        :param seen_mask_instances: a count of how many fixed mask instances\
            have been seen, so that it can increment the routing part of the\
            fixed mask fields.
        :return: the routing keys and masks for the partition and the number\
            of seen fixed masks instances
        """
        routing_keys_and_masks = list()
        application_keys_and_masks = list()
        fixed_key_constraints = locate_constraints_of_type(
            partition.constraints, FixedKeyAndMaskConstraint)
        fixed_mask_constraints = locate_constraints_of_type(
            partition.constraints, FixedMaskConstraint)
        fixed_field_constraints = locate_constraints_of_type(
            partition.constraints, FixedKeyFieldConstraint)
        flexi_field_constraints = locate_constraints_of_type(
            partition.constraints, FlexiKeyFieldConstraint)
        continuous_constraints = locate_constraints_of_type(
            partition.constraints, ContiguousKeyRangeContraint)

        if fixed_key_constraints:
            fixed_keys_fields = \
                self._field_mapper[fixed_key_constraints[0].keys_and_masks[0]]

            # tracker for iterator fields
            range_based_fixed_key_fields = list()

            # add the app field (must exist in this situation)
            inputs = dict()
            inputs[APPLICATION_DIVIDER_FIELD_NAME] = \
                self._fixed_key_application_field_value

            # handle the inputs for static parts of the mask
            for fixed_key_field in fixed_keys_fields:
                app_field_space = bit_field_space(**inputs)
                tag = list(app_field_space.get_tags(
                    str(fixed_key_field.name)))[0]
                if tag == SUPPORTED_TAGS.ROUTING.name:
                    inputs[str(fixed_key_field.name)] = fixed_key_field.value
                elif tag == SUPPORTED_TAGS.APPLICATION.name:
                    range_based_fixed_key_fields.append(fixed_key_field)
                else:
                    raise exceptions.PacmanConfigurationException(
                        "Don't know this tag field, sorry")

            if len(range_based_fixed_key_fields) > 1:
                raise exceptions.PacmanConfigurationException(
                    "Multiple fixed key fields are not supported")

            # get n keys from n_keys_map for the range based mask part
            n_keys = n_keys_map.n_keys_for_partition(partition)

            # generate keys
            for key_index in range(n_keys):
                inputs[str(range_based_fixed_key_fields[0].name)] = key_index
                # routing keys and masks
                routing_key = bit_field_space(**inputs).get_value(
                    tag=SUPPORTED_TAGS.ROUTING.name)
                routing_mask = bit_field_space(**inputs).get_mask(
                    tag=SUPPORTED_TAGS.ROUTING.name)
                routing_keys_and_masks.append(BaseKeyAndMask(routing_key,
                                                             routing_mask))

                # application keys and masks
                application_key = bit_field_space(**inputs).get_value(
                    tag=SUPPORTED_TAGS.APPLICATION.name)
                application_mask = bit_field_space(**inputs).get_mask(
                    tag=SUPPORTED_TAGS.APPLICATION.name)
                application_keys_and_masks.append(BaseKeyAndMask(
                    application_key, application_mask))
        elif fixed_mask_constraints:

            # get constraint and its fields
            fixed_mask_constraint_mask = fixed_mask_constraints[0].mask
            fixed_mask_fields = self._field_mapper[fixed_mask_constraint_mask]

            # tracker for iterator fields
            range_based_fixed_mask_fields = list()

            # add the app field (must exist in this situation)
            inputs = dict()
            inputs[APPLICATION_DIVIDER_FIELD_NAME] = \
                self._fixed_mask_application_field_value

            # handle the inputs for static parts of the mask
            for fixed_mask_field in fixed_mask_fields:
                app_field_space = bit_field_space(**inputs)
                tag = list(app_field_space.get_tags(str(fixed_mask_field)))[0]
                if tag == SUPPORTED_TAGS.ROUTING.name:
                    inputs[str(fixed_mask_field)] = seen_mask_instances
                elif tag == SUPPORTED_TAGS.APPLICATION.name:
                    range_based_fixed_mask_fields.append(fixed_mask_field)
                else:
                    raise exceptions.PacmanConfigurationException(
                        "I don't recognise this tag. sorry")

            if len(range_based_fixed_mask_fields) > 1:
                raise exceptions.PacmanConfigurationException(
                    "Multiple fixed mask fields are not supported")

            # get n keys from n_keys_map for the range based mask part
            n_keys = n_keys_map.n_keys_for_partition(partition)

            # generate keys
            for key_index in range(n_keys):
                inputs[str(range_based_fixed_mask_fields[0])] = key_index
                # routing keys and masks
                routing_key = bit_field_space(**inputs).get_value(
                    tag=SUPPORTED_TAGS.ROUTING.name)
                routing_mask = bit_field_space(**inputs).get_mask(
                    tag=SUPPORTED_TAGS.ROUTING.name)
                routing_keys_and_masks.append(BaseKeyAndMask(routing_key,
                                                             routing_mask))

                # application keys and masks
                application_key = bit_field_space(**inputs).get_value(
                    tag=SUPPORTED_TAGS.APPLICATION.name)
                application_mask = bit_field_space(**inputs).get_mask(
                    tag=SUPPORTED_TAGS.APPLICATION.name)
                application_keys_and_masks.append(BaseKeyAndMask(
                    application_key, application_mask))

            # update the seen mask instances so the next one gets a new key
            seen_mask_instances += 1

        elif fixed_field_constraints:
            # TODO: need to fill this out
            raise exceptions.PacmanConfigurationException(
                "Fixed field constraints are not supported")

        elif flexi_field_constraints:
            inputs = dict()

            # if there's a application field, add the value
            if self._flexi_field_application_field_values:
                inputs[APPLICATION_DIVIDER_FIELD_NAME] = \
                    self._flexi_field_application_field_values[
                        flexi_field_constraints[0].fields[0].name]

            # collect flexible fields by group
            range_based_fixed_mask_fields = list()
            for field in flexi_field_constraints[0].fields:
                if field.value is not None:
                    inputs[field.name] = field.value
                else:
                    range_based_fixed_mask_fields.append(field)

            # if the set contains a range_based flexible field do search
            if range_based_fixed_mask_fields:
                routing_keys_and_masks, application_keys_and_masks = \
                    self._handle_set_of_flexi_range_fields(
                        range_based_fixed_mask_fields, bit_field_space,
                        routing_keys_and_masks, application_keys_and_masks,
                        inputs, 0)

            else:  # no range, just grab the key and value
                key = bit_field_space(**inputs).get_value()
                mask = bit_field_space(**inputs).get_mask()
                routing_keys_and_masks.append(BaseKeyAndMask(key, mask))

        # if there's a continuous constraint check, check the keys for
        # continuity
        if continuous_constraints:
            if not self._check_keys_are_continuous(
                    application_keys_and_masks):
                raise exceptions.PacmanConfigurationException(
                    "These keys returned from the bitfield are"
                    "not continuous. Therefore cannot be used")

            # if continuous, we only need the first key, so drop the rest
            routing_keys_and_masks = routing_keys_and_masks[0:1]

        # return keys and masks
        return routing_keys_and_masks, seen_mask_instances
Exemplo n.º 36
0
def _check_if_partition_has_continuous_keys(partition):
    continuous_constraints = utility_calls.locate_constraints_of_type(
        partition.constraints, ContiguousKeyRangeContraint)
    # TODO: Can we do better here?
    return len(continuous_constraints) > 0
Exemplo n.º 37
0
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """

        :param one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :type one_to_one_groups:
            list(set(vertex))
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(vertex, collection(vertex))
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param progress:
        :return:
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        unconstrained = list()
        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtualVertex):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups,
                    all_vertices_placed, progress)
            else:
                unconstrained.append(vertex)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress)

        progress.end()
        return placements
def _check_if_partition_has_continuous_keys(partition):
    continuous_constraints = utility_calls.locate_constraints_of_type(
        partition.constraints, ContiguousKeyRangeContraint)
    # TODO: Can we do better here?
    return len(continuous_constraints) > 0
Exemplo n.º 39
0
    def __call__(self, subgraph, n_keys_map, graph_mapper=None):

        # check that this algorithm supports the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=subgraph.partitions,
            supported_constraints=[
                KeyAllocatorFixedMaskConstraint,
                KeyAllocatorFixedKeyAndMaskConstraint,
                KeyAllocatorContiguousRangeContraint
            ],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        routing_info_allocator_utilities.\
            check_types_of_edge_constraint(subgraph)

        routing_infos = RoutingInfo()

        # Get the partitioned edges grouped by those that require the same key
        (fixed_key_groups, fixed_mask_groups, fixed_field_groups,
         flexi_field_groups, continuous_groups, none_continuous_groups) = \
            routing_info_allocator_utilities.get_edge_groups(subgraph)

        # Even non-continuous keys will be continuous
        for group in none_continuous_groups:
            continuous_groups.add(group)

        # Go through the groups and allocate keys
        progress_bar = ProgressBar(len(subgraph.partitions),
                                   "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in fixed_key_groups:  # fixed keys groups

            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            fixed_mask = None
            fixed_key_and_mask_constraint = \
                utility_calls.locate_constraints_of_type(
                    group.constraints,
                    KeyAllocatorFixedKeyAndMaskConstraint)[0]

            # attempt to allocate them
            self._allocate_fixed_keys_and_masks(
                fixed_key_and_mask_constraint.keys_and_masks, fixed_mask)

            # update the pacman data objects
            self._update_routing_objects(
                fixed_key_and_mask_constraint.keys_and_masks, routing_infos,
                group)

            continuous_groups.remove(group)

            progress_bar.update()

        for group in fixed_mask_groups:  # fixed mask groups

            # get mask and fields if need be
            fixed_mask = utility_calls.locate_constraints_of_type(
                group.constraints, KeyAllocatorFixedMaskConstraint)[0].mask

            fields = None
            if group in fixed_field_groups:
                fields = utility_calls.locate_constraints_of_type(
                    group.constraints,
                    KeyAllocatorFixedFieldConstraint)[0].fields
                fixed_field_groups.remove(group)

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                fixed_mask, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)

            continuous_groups.remove(group)

            progress_bar.update()

        for group in fixed_field_groups:
            fields = utility_calls.locate_constraints_of_type(
                group.constraints, KeyAllocatorFixedFieldConstraint)[0].fields

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                None, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)

            continuous_groups.remove(group)

            progress_bar.update()

        if len(flexi_field_groups) != 0:
            raise exceptions.PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        # If there is a graph, group by source vertex and sort by vertex slice
        # (lo_atom)
        if graph_mapper is not None:
            vertex_groups = defaultdict(list)
            for partition in continuous_groups:
                vertex = graph_mapper.get_vertex_from_subvertex(
                    partition.edges[0].pre_subvertex)
                vertex_groups[vertex].append(partition)
            vertex_partitions = list()
            for vertex_group in vertex_groups.itervalues():
                sorted_partitions = sorted(
                    vertex_group,
                    key=lambda part: graph_mapper.get_subvertex_slice(
                        part.edges[0].pre_subvertex))
                vertex_partitions.extend(sorted_partitions)
            continuous_groups = vertex_partitions

        for group in continuous_groups:
            keys_and_masks = self._allocate_keys_and_masks(
                None, None, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)

        progress_bar.end()
        return {'routing_infos': routing_infos}
    def __call__(self, partitioned_graph, file_path):
        """

        :param partitioned_graph:
        :param folder_path:
        :return:
        """
        progress_bar = ProgressBar(len(partitioned_graph.subvertices), "Converting to json partitioned graph")
        # write basic stuff
        json_graph_dictory_rep = dict()

        # write vertices data
        vertices_resources = dict()
        json_graph_dictory_rep["vertices_resources"] = vertices_resources

        edges_resources = defaultdict()
        json_graph_dictory_rep["edges"] = edges_resources

        for vertex in partitioned_graph.subvertices:

            # handle external devices
            if isinstance(vertex, AbstractVirtualVertex):
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = 0

            # handle tagged vertices
            elif len(utility_calls.locate_constraints_of_type(vertex.constraints, AbstractTagAllocatorConstraint)) != 0:

                # handle the edge between the tag-able vertex and the fake
                # vertex
                hyper_edge_dict = dict()
                edges_resources[hashlib.md5(vertex.label).hexdigest()] = hyper_edge_dict
                hyper_edge_dict["source"] = str(id(vertex))
                hyper_edge_dict["sinks"] = [hashlib.md5(vertex.label).hexdigest()]
                hyper_edge_dict["weight"] = 1.0
                hyper_edge_dict["type"] = "FAKE_TAG_EDGE"

                # add the tag-able vertex
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = DEFAULT_NOUMBER_OF_CORES_USED_PER_PARTITIONED_VERTEX
                vertex_resources["sdram"] = int(vertex.resources_required.sdram.get_value())

                # add fake vertex
                vertex_resources = dict()
                vertices_resources[hashlib.md5(vertex.label).hexdigest()] = vertex_resources
                vertex_resources["cores"] = 0
                vertex_resources["sdram"] = 0

            # handle standard vertices
            else:
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = DEFAULT_NOUMBER_OF_CORES_USED_PER_PARTITIONED_VERTEX
                vertex_resources["sdram"] = int(vertex.resources_required.sdram.get_value())
            vertex_outgoing_partitions = partitioned_graph.outgoing_edges_partitions_from_vertex(vertex)

            # handle the vertex edges
            for vertex_partition in vertex_outgoing_partitions:
                hyper_edge_dict = dict()
                edges_resources["{}:{}".format(id(vertex), vertex_partition)] = hyper_edge_dict
                hyper_edge_dict["source"] = str(id(vertex))

                sinks_string = []
                for edge in vertex_outgoing_partitions[vertex_partition].edges:
                    sinks_string.append(str(id(edge.post_subvertex)))
                hyper_edge_dict["sinks"] = sinks_string
                hyper_edge_dict["weight"] = 1.0
                hyper_edge_dict["type"] = vertex_outgoing_partitions[vertex_partition].type.name.lower()
            progress_bar.update()

        file_to_write = open(file_path, "w")
        json.dump(json_graph_dictory_rep, file_to_write)
        file_to_write.close()

        # validate the schema
        partitioned_graph_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__), "partitioned_graph.json"
        )
        file_to_read = open(partitioned_graph_schema_file_path, "r")
        partitioned_graph_schema = json.load(file_to_read)
        jsonschema.validate(json_graph_dictory_rep, partitioned_graph_schema)

        progress_bar.end()

        return {"partitioned_graph": file_path}
Exemplo n.º 41
0
def _check_if_partition_has_continuous_keys(partition):
    continuous_constraints =\
        utility_calls.locate_constraints_of_type(
            partition.constraints, KeyAllocatorContiguousRangeContraint)
    return not (len(continuous_constraints) == 0)
def get_edge_groups(machine_graph, traffic_type):
    """ Utility method to get groups of edges using any\
        :py:class:`pacman.model.constraints.key_allocator_constraints.KeyAllocatorSameKeyConstraint`\
        constraints.  Note that no checking is done here about conflicts\
        related to other constraints.

    :param machine_graph: the machine graph
    :param traffic_type: the traffic type to group
    """

    # mapping between partition and shared key group it is in
    partition_groups = dict()

    # process each partition one by one in a bubble sort kinda way
    for vertex in machine_graph.vertices:
        for partition in machine_graph.\
                get_outgoing_edge_partitions_starting_at_vertex(vertex):

            # only process partitions of the correct traffic type
            if partition.traffic_type == traffic_type:

                # Get a set of partitions that should be grouped together
                shared_key_constraints = locate_constraints_of_type(
                    partition.constraints, ShareKeyConstraint)
                partitions_to_group = [partition]
                for constraint in shared_key_constraints:
                    partitions_to_group.extend(constraint.other_partitions)

                # Get a set of groups that should be grouped
                groups_to_group = [
                    partition_groups.get(part_to_group, [part_to_group])
                    for part_to_group in partitions_to_group
                ]

                # Group the groups
                new_group = ConstraintGroup(part for group in groups_to_group
                                            for part in group)
                partition_groups.update(
                    {part: new_group
                     for part in new_group})

    # Keep track of groups
    fixed_key_groups = list()
    shared_key_groups = list()
    fixed_mask_groups = list()
    fixed_field_groups = list()
    flexi_field_groups = list()
    continuous_groups = list()
    noncontinuous_groups = list()
    groups_by_type = {
        FixedKeyAndMaskConstraint: fixed_key_groups,
        FixedMaskConstraint: fixed_mask_groups,
        FixedKeyFieldConstraint: fixed_field_groups,
        FlexiKeyFieldConstraint: flexi_field_groups,
    }
    groups = set(itervalues(partition_groups))
    for group in groups:

        # Get all expected constraints in the group
        constraints = [
            constraint for partition in group
            for constraint in locate_constraints_of_type(
                partition.constraints, (FixedKeyAndMaskConstraint,
                                        FixedMaskConstraint,
                                        FlexiKeyFieldConstraint,
                                        FixedKeyFieldConstraint))
        ]

        # Check that the possibly conflicting constraints are equal
        if constraints and not all(constraint_a == constraint_b
                                   for constraint_a in constraints
                                   for constraint_b in constraints):
            raise PacmanRouteInfoAllocationException(
                "The group of partitions {} have conflicting constraints".
                format(constraints))

        # If no constraints, must be one of the non-specific groups
        if not constraints:
            # If the group has only one item, it is not shared
            if len(group) == 1:
                continuous_constraints = [
                    constraint for partition in group
                    for constraint in locate_constraints_of_type(
                        constraints, ContiguousKeyRangeContraint)
                ]
                if continuous_constraints:
                    continuous_groups.append(group)
                else:
                    noncontinuous_groups.append(group)

            # If the group has more than one partition, it must be shared
            else:
                shared_key_groups.append(group)

        # If constraints found, put the group in the appropriate constraint
        # group
        else:
            group._set_constraint(constraints[0])
            constraint_type = type(constraints[0])
            groups_by_type[constraint_type].append(group)

    # return the set of groups
    return (fixed_key_groups, shared_key_groups, fixed_mask_groups,
            fixed_field_groups, flexi_field_groups, continuous_groups,
            noncontinuous_groups)
def check_types_of_edge_constraint(machine_graph):
    """ Go through the graph for operations and checks that the constraints\
        are compatible.

    :param machine_graph: the graph to search through
    :rtype: None:
    """
    for partition in machine_graph.outgoing_edge_partitions:
        fixed_key = locate_constraints_of_type(partition.constraints,
                                               FixedKeyAndMaskConstraint)

        fixed_mask = locate_constraints_of_type(partition.constraints,
                                                FixedMaskConstraint)

        fixed_field = locate_constraints_of_type(partition.constraints,
                                                 FixedKeyFieldConstraint)

        flexi_field = locate_constraints_of_type(partition.constraints,
                                                 FlexiKeyFieldConstraint)

        if (len(fixed_key) > 1 or len(fixed_field) > 1 or len(fixed_mask) > 1
                or len(flexi_field) > 1):
            raise PacmanConfigurationException(
                "There are more than one of the same constraint type on "
                "the partition {} starting at {}. Please fix and try again.".
                format(partition.identifier, partition.pre_vertex))

        fixed_key = len(fixed_key) == 1
        fixed_mask = len(fixed_mask) == 1
        fixed_field = len(fixed_field) == 1
        flexi_field = len(flexi_field) == 1

        # check for fixed key and a fixed mask. as these should have been
        # merged before now
        if fixed_key and fixed_mask:
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a fixed key and fixed "
                "mask constraint. These can be merged together, but is "
                "deemed an error here".format(partition.identifer,
                                              partition.pre_vertex))

        # check for a fixed key and fixed field, as these are incompatible
        if fixed_key and fixed_field:
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a fixed key and fixed "
                "field constraint. These may be merge-able together, but "
                "is deemed an error here".format(partition.identifer,
                                                 partition.pre_vertex))

        # check that a fixed mask and fixed field have compatible masks
        if fixed_mask and fixed_field:
            _check_masks_are_correct(partition)

        # check that if there's a flexible field, and something else, throw
        # error
        if flexi_field and (fixed_mask or fixed_key or fixed_field):
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a flexible field and "
                "another fixed constraint. These maybe be merge-able, but "
                "is deemed an error here".format(partition.identifer,
                                                 partition.pre_vertex))
def get_edge_groups(machine_graph, traffic_type):
    """ Utility method to get groups of edges using any\
        :py:class:`~pacman.model.constraints.key_allocator_constraints.KeyAllocatorSameKeyConstraint`\
        constraints.  Note that no checking is done here about conflicts\
        related to other constraints.

    :param machine_graph: the machine graph
    :param traffic_type: the traffic type to group
    """

    # mapping between partition and shared key group it is in
    partition_groups = OrderedDict()

    # process each partition one by one in a bubble sort kinda way
    for vertex in machine_graph.vertices:
        for partition in machine_graph.\
                get_outgoing_edge_partitions_starting_at_vertex(vertex):

            # only process partitions of the correct traffic type
            if partition.traffic_type == traffic_type:

                # Get a set of partitions that should be grouped together
                shared_key_constraints = locate_constraints_of_type(
                    partition.constraints, ShareKeyConstraint)
                partitions_to_group = [partition]
                for constraint in shared_key_constraints:
                    partitions_to_group.extend(constraint.other_partitions)

                # Get a set of groups that should be grouped
                groups_to_group = [
                    partition_groups.get(part_to_group, [part_to_group])
                    for part_to_group in partitions_to_group]

                # Group the groups
                new_group = ConstraintGroup(
                    part for group in groups_to_group for part in group)
                partition_groups.update(
                    {part: new_group for part in new_group})

    # Keep track of groups
    fixed_key_groups = list()
    shared_key_groups = list()
    fixed_mask_groups = list()
    fixed_field_groups = list()
    flexi_field_groups = list()
    continuous_groups = list()
    noncontinuous_groups = list()
    groups_by_type = {
        FixedKeyAndMaskConstraint: fixed_key_groups,
        FixedMaskConstraint: fixed_mask_groups,
        FixedKeyFieldConstraint: fixed_field_groups,
        FlexiKeyFieldConstraint: flexi_field_groups,
    }
    groups = OrderedSet(itervalues(partition_groups))
    for group in groups:

        # Get all expected constraints in the group
        constraints = [
            constraint for partition in group
            for constraint in locate_constraints_of_type(
                partition.constraints,
                (FixedKeyAndMaskConstraint, FixedMaskConstraint,
                 FlexiKeyFieldConstraint, FixedKeyFieldConstraint))]

        # Check that the possibly conflicting constraints are equal
        if constraints and not all(
                constraint_a == constraint_b for constraint_a in constraints
                for constraint_b in constraints):
            raise PacmanRouteInfoAllocationException(
                "The group of partitions {} have conflicting constraints"
                .format(constraints))

        # If no constraints, must be one of the non-specific groups
        if not constraints:
            # If the group has only one item, it is not shared
            if len(group) == 1:
                continuous_constraints = [
                    constraint for partition in group
                    for constraint in locate_constraints_of_type(
                        constraints, ContiguousKeyRangeContraint)]
                if continuous_constraints:
                    continuous_groups.append(group)
                else:
                    noncontinuous_groups.append(group)

            # If the group has more than one partition, it must be shared
            else:
                shared_key_groups.append(group)

        # If constraints found, put the group in the appropriate constraint
        # group
        else:
            group._set_constraint(constraints[0])
            constraint_type = type(constraints[0])
            groups_by_type[constraint_type].append(group)

    # return the set of groups
    return (fixed_key_groups, shared_key_groups,
            fixed_mask_groups, fixed_field_groups, flexi_field_groups,
            continuous_groups, noncontinuous_groups)
Exemplo n.º 45
0
    def __call__(self, partitioned_graph, machine):

        # check that the algorithm can handle the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=partitioned_graph.subvertices,
            supported_constraints=[
                PlacerRadialPlacementFromChipConstraint,
                TagAllocatorRequireIptagConstraint,
                TagAllocatorRequireReverseIptagConstraint,
                PlacerChipAndCoreConstraint],
            abstract_constraint_type=AbstractPlacerConstraint)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained_vertices = list()
        unconstrained_vertices = set()
        for subvertex in partitioned_graph.subvertices:
            placement_constraints = utility_calls.locate_constraints_of_type(
                subvertex.constraints, AbstractPlacerConstraint)
            if len(placement_constraints) > 0:
                constrained_vertices.append(subvertex)
            else:
                unconstrained_vertices.add(subvertex)

        # Iterate over constrained vertices and generate placements
        progress_bar = ProgressBar(len(partitioned_graph.subvertices),
                                   "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        for vertex in constrained_vertices:
            self._place_vertex(vertex, resource_tracker, machine, placements)
            progress_bar.update()

        while len(unconstrained_vertices) > 0:

            # Keep track of all vertices connected to the currently placed ones
            next_vertices = set()

            # Initially, add the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained_vertices, partitioned_graph)
            next_vertices.add(max_connected_vertex)

            while len(next_vertices) > 0:

                # Find the vertex most connected to the currently placed
                # vertices
                vertex = self._find_max_connected_vertex(next_vertices,
                                                         partitioned_graph)

                # Place the vertex
                self._place_vertex(vertex, resource_tracker, machine,
                                   placements)
                progress_bar.update()
                unconstrained_vertices.remove(vertex)
                next_vertices.remove(vertex)

                # Add all vertices connected to this one to the set
                for in_edge in (partitioned_graph
                                .incoming_subedges_from_subvertex(vertex)):
                    if in_edge.pre_subvertex in unconstrained_vertices:
                        next_vertices.add(in_edge.pre_subvertex)
                for out_edge in (partitioned_graph
                                 .outgoing_subedges_from_subvertex(vertex)):
                    if out_edge.post_subvertex in unconstrained_vertices:
                        next_vertices.add(out_edge.post_subvertex)
        # finished, so stop progress bar and return placements
        progress_bar.end()
        return {'placements': placements}
Exemplo n.º 46
0
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """
        :param list(set(MachineVertex)) one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(MachineVertex, collection(MachineVertex))
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_utilities.progress_bar.ProgressBar progress:
        :rtype: Placements
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtual):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed, progress,
                    machine_graph)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)
            if not chips:
                chips = None

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed, machine_graph)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress, machine_graph)

        progress.end()
        return placements
Exemplo n.º 47
0
    def __call__(self, graph, machine):

        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            supported_constraints=[PartitionerMaximumSizeConstraint],
            abstract_constraint_type=AbstractPartitionerConstraint)

        # start progress bar
        progress_bar = ProgressBar(len(graph.vertices),
                                   "Partitioning graph vertices")
        vertices = graph.vertices
        subgraph = PartitionedGraph(label="partitioned_graph for partitionable"
                                          "_graph {}".format(graph.label))
        graph_to_subgraph_mapper = GraphMapper(graph.label, subgraph.label)
        resource_tracker = ResourceTracker(machine)

        # Partition one vertex at a time
        for vertex in vertices:

            # Get the usage of the first atom, then assume that this
            # will be the usage of all the atoms
            requirements = vertex.get_resources_used_by_atoms(Slice(0, 1),
                                                              graph)

            # Locate the maximum resources available
            max_resources_available = \
                resource_tracker.get_maximum_constrained_resources_available(
                    vertex.constraints)

            # Find the ratio of each of the resources - if 0 is required,
            # assume the ratio is the max available
            atoms_per_sdram = self._get_ratio(
                max_resources_available.sdram.get_value(),
                requirements.sdram.get_value())
            atoms_per_dtcm = self._get_ratio(
                max_resources_available.dtcm.get_value(),
                requirements.dtcm.get_value())
            atoms_per_cpu = self._get_ratio(
                max_resources_available.cpu.get_value(),
                requirements.cpu.get_value())

            max_atom_values = [atoms_per_sdram, atoms_per_dtcm, atoms_per_cpu]

            max_atoms_constraints = utility_calls.locate_constraints_of_type(
                vertex.constraints, PartitionerMaximumSizeConstraint)
            for max_atom_constraint in max_atoms_constraints:
                max_atom_values.append(max_atom_constraint.size)

            atoms_per_core = min(max_atom_values)

            # Partition into subvertices
            counted = 0
            while counted < vertex.n_atoms:

                # Determine subvertex size
                remaining = vertex.n_atoms - counted
                if remaining > atoms_per_core:
                    alloc = atoms_per_core
                else:
                    alloc = remaining

                # Create and store new subvertex, and increment elements
                #  counted
                if counted < 0 or counted + alloc - 1 < 0:
                    raise PacmanPartitionException("Not enough resources"
                                                   " available to create"
                                                   " subvertex")

                vertex_slice = Slice(counted, counted + (alloc - 1))
                subvertex_usage = vertex.get_resources_used_by_atoms(
                    vertex_slice, graph)

                subvert = vertex.create_subvertex(
                    vertex_slice, subvertex_usage,
                    "{}:{}:{}".format(vertex.label, counted,
                                      (counted + (alloc - 1))),
                    partition_algorithm_utilities.
                    get_remaining_constraints(vertex))
                subgraph.add_subvertex(subvert)
                graph_to_subgraph_mapper.add_subvertex(
                    subvert, vertex_slice, vertex)
                counted = counted + alloc

                # update allocated resources
                resource_tracker.allocate_constrained_resources(
                    subvertex_usage, vertex.constraints)

            # update and end progress bars as needed
            progress_bar.update()
        progress_bar.end()

        partition_algorithm_utilities.generate_sub_edges(
            subgraph, graph_to_subgraph_mapper, graph)

        return {'Partitioned_graph': subgraph,
                'Graph_mapper': graph_to_subgraph_mapper}
    def __call__(self, machine_graph, n_keys_map, routing_tables):
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint,
                FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, _shared_keys, fixed_masks, fixed_fields, flexi_fields,
         continuous, noncontinuous) = \
            get_edge_groups(machine_graph, EdgeTrafficType.MULTICAST)
        if flexi_fields:
            raise PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        # Even non-continuous keys will be continuous
        for group in noncontinuous:
            continuous.add(group)

        # Go through the groups and allocate keys
        progress = ProgressBar(
            machine_graph.n_outgoing_edge_partitions,
            "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            fixed_mask = None
            fixed_key_and_mask_constraint = locate_constraints_of_type(
                group.constraints, FixedKeyAndMaskConstraint)[0]

            # attempt to allocate them
            self._allocate_fixed_keys_and_masks(
                fixed_key_and_mask_constraint.keys_and_masks, fixed_mask)

            # update the pacman data objects
            self._update_routing_objects(
                fixed_key_and_mask_constraint.keys_and_masks, routing_infos,
                group)
            continuous.remove(group)

        for group in progress.over(fixed_masks, False):
            # get mask and fields if need be
            fixed_mask = locate_constraints_of_type(
                group.constraints, FixedMaskConstraint)[0].mask

            fields = None
            if group in fixed_fields:
                fields = locate_constraints_of_type(
                    group.constraints, FixedKeyFieldConstraint)[0].fields
                fixed_fields.remove(group)

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                fixed_mask, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        for group in progress.over(fixed_fields, False):
            fields = locate_constraints_of_type(
                group.constraints, FixedKeyFieldConstraint)[0].fields

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                None, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        # Sort the rest of the groups, using the routing tables for guidance
        # Group partitions by those which share routes in any table
        partition_groups = OrderedDict()
        routers = reversed(sorted(
            routing_tables.get_routers(),
            key=lambda item: len(routing_tables.get_entries_for_router(
                item[0], item[1]))))
        for x, y in routers:

            # Find all partitions that share a route in this table
            partitions_by_route = defaultdict(OrderedSet)
            routing_table = routing_tables.get_entries_for_router(x, y)
            for partition, entry in iteritems(routing_table):
                if partition in continuous:
                    entry_hash = sum(
                        1 << i
                        for i in entry.link_ids)
                    entry_hash += sum(
                        1 << (i + 6)
                        for i in entry.processor_ids)
                    partitions_by_route[entry_hash].add(partition)

            for entry_hash, partitions in iteritems(partitions_by_route):
                found_groups = list()
                for partition in partitions:
                    if partition in partition_groups:
                        found_groups.append(partition_groups[partition])

                if not found_groups:
                    # If no group was found, create a new one
                    for partition in partitions:
                        partition_groups[partition] = partitions

                elif len(found_groups) == 1:
                    # If a single other group was found, merge it
                    for partition in partitions:
                        found_groups[0].add(partition)
                        partition_groups[partition] = found_groups[0]

                else:
                    # Merge the groups
                    new_group = partitions
                    for group in found_groups:
                        for partition in group:
                            new_group.add(partition)
                    for partition in new_group:
                        partition_groups[partition] = new_group

        # Sort partitions by largest group
        continuous = list(OrderedSet(
            tuple(group) for group in itervalues(partition_groups)))

        for group in reversed(sorted(continuous, key=len)):
            for partition in progress.over(group, False):
                keys_and_masks = self._allocate_keys_and_masks(
                    None, None, n_keys_map.n_keys_for_partition(partition))

                # update the pacman data objects
                self._update_routing_objects(
                    keys_and_masks, routing_infos, partition)

        progress.end()
        return routing_infos
Exemplo n.º 49
0
    def __call__(self, machine, placements):
        """ see AbstractTagAllocatorAlgorithm.allocate_tags
        """

        resource_tracker = ResourceTracker(machine)

        # Check that the algorithm can handle the constraints
        progress_bar = ProgressBar(placements.n_placements,
                                   "Allocating tags")
        placements_with_tags = list()
        for placement in placements.placements:
            utility_calls.check_algorithm_can_support_constraints(
                constrained_vertices=[placement.subvertex],
                supported_constraints=[
                    TagAllocatorRequireIptagConstraint,
                    TagAllocatorRequireReverseIptagConstraint
                ],
                abstract_constraint_type=AbstractTagAllocatorConstraint)
            if len(utility_calls.locate_constraints_of_type(
                    placement.subvertex.constraints,
                    AbstractTagAllocatorConstraint)):
                placements_with_tags.append(placement)
            progress_bar.update()

        # Go through and allocate the tags
        tags = Tags()
        for placement in placements_with_tags:
            vertex = placement.subvertex

            # Get the constraint details for the tags
            (board_address, ip_tags, reverse_ip_tags) =\
                utility_calls.get_ip_tag_info(vertex.constraints)

            # Allocate the tags, first-come, first-served, using the
            # fixed placement of the vertex, and the required resources
            chips = [(placement.x, placement.y)]
            resources = vertex.resources_required
            (_, _, _, returned_ip_tags, returned_reverse_ip_tags) = \
                resource_tracker.allocate_resources(
                    resources, chips, placement.p, board_address, ip_tags,
                    reverse_ip_tags)

            # Put the allocated ip tag information into the tag object
            if returned_ip_tags is not None:
                for (tag_constraint, (board_address, tag)) in zip(
                        ip_tags, returned_ip_tags):
                    ip_tag = IPTag(
                        board_address, tag, tag_constraint.ip_address,
                        tag_constraint.port, tag_constraint.strip_sdp)
                    tags.add_ip_tag(ip_tag, vertex)

            # Put the allocated reverse ip tag information into the tag object
            if returned_reverse_ip_tags is not None:
                for (tag_constraint, (board_address, tag)) in zip(
                        reverse_ip_tags, returned_reverse_ip_tags):
                    reverse_ip_tag = ReverseIPTag(
                        board_address, tag, tag_constraint.port, placement.x,
                        placement.y, placement.p, tag_constraint.sdp_port)
                    tags.add_reverse_ip_tag(reverse_ip_tag, vertex)

        progress_bar.end()
        return {'tags': tags}
    def _partition_vertex(
            self, vertex, plan_n_timesteps, machine_graph, graph_mapper,
            resource_tracker, progress, vertex_groups):
        """ Partition a single vertex

        :param vertex: the vertex to partition
        :type vertex:\
            :py:class:`pacman.model.graphs.application.ApplicationVertex`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param machine_graph: the graph to add vertices to
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param graph_mapper: the mappings between graphs
        :type graph_mapper:\
            :py:class:`pacman.model.graphs.common.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
            :py:class:`pacman.utilities.ResourceTracker`
        :param progress: The progress bar
        :param vertex_groups: Groups together vertices that are supposed to\
            be the same size
        :rtype: None
        :raise pacman.exceptions.PacmanPartitionException: \
            if the extra vertex for partitioning identically has a different\
            number of atoms than its counterpart.
        """
        partition_together_vertices = list(vertex_groups[vertex])

        # locate max atoms per core and fixed atoms per core
        possible_max_atoms = list()
        n_atoms = None
        for other_vertex in partition_together_vertices:
            if isinstance(other_vertex, ApplicationVertex):
                possible_max_atoms.append(
                    other_vertex.get_max_atoms_per_core())
            max_atom_constraints = utils.locate_constraints_of_type(
                other_vertex.constraints, MaxVertexAtomsConstraint)
            for constraint in max_atom_constraints:
                possible_max_atoms.append(constraint.size)
            n_atom_constraints = utils.locate_constraints_of_type(
                other_vertex.constraints, FixedVertexAtomsConstraint)
            for constraint in n_atom_constraints:
                if n_atoms is not None and constraint.size != n_atoms:
                    raise PacmanPartitionException(
                        "Vertex has multiple contradictory fixed atom "
                        "constraints - cannot be both {} and {}".format(
                            n_atoms, constraint.size))
                n_atoms = constraint.size

        max_atoms_per_core = int(min(possible_max_atoms))
        if n_atoms is not None and max_atoms_per_core < n_atoms:
            raise PacmanPartitionException(
                "Max size of {} is incompatible with fixed size of {}".format(
                    max_atoms_per_core, n_atoms))
        if n_atoms is not None:
            max_atoms_per_core = n_atoms
            if vertex.n_atoms % n_atoms != 0:
                raise PacmanPartitionException(
                    "Vertex of {} atoms cannot be divided into units of {}"
                    .format(vertex.n_atoms, n_atoms))

        # partition by atoms
        self._partition_by_atoms(
            partition_together_vertices, plan_n_timesteps, vertex.n_atoms,
            max_atoms_per_core, machine_graph, graph_mapper, resource_tracker,
            progress, n_atoms is not None)
    def __call__(self, partitioned_graph, file_path):
        """

        :param partitioned_graph:
        :param folder_path:
        :return:
        """
        progress_bar = ProgressBar(len(partitioned_graph.subvertices),
                                   "Converting to json partitioned graph")
        # write basic stuff
        json_graph_dictory_rep = dict()

        # write vertices data
        vertices_resources = dict()
        json_graph_dictory_rep["vertices_resources"] = vertices_resources

        edges_resources = defaultdict()
        json_graph_dictory_rep["edges"] = edges_resources

        for vertex in partitioned_graph.subvertices:

            # handle external devices
            if isinstance(vertex, AbstractVirtualVertex):
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = 0

            # handle tagged vertices
            elif len(
                    utility_calls.locate_constraints_of_type(
                        vertex.constraints,
                        AbstractTagAllocatorConstraint)) != 0:

                # handle the edge between the tag-able vertex and the fake
                # vertex
                hyper_edge_dict = dict()
                edges_resources[hashlib.md5(vertex.label).hexdigest()] = \
                    hyper_edge_dict
                hyper_edge_dict["source"] = str(id(vertex))
                hyper_edge_dict['sinks'] = \
                    [hashlib.md5(vertex.label).hexdigest()]
                hyper_edge_dict["weight"] = 1.0
                hyper_edge_dict["type"] = "FAKE_TAG_EDGE"

                # add the tag-able vertex
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = \
                    DEFAULT_NOUMBER_OF_CORES_USED_PER_PARTITIONED_VERTEX
                vertex_resources["sdram"] = \
                    int(vertex.resources_required.sdram.get_value())

                # add fake vertex
                vertex_resources = dict()
                vertices_resources[hashlib.md5(
                    vertex.label).hexdigest()] = vertex_resources
                vertex_resources["cores"] = 0
                vertex_resources["sdram"] = 0

            # handle standard vertices
            else:
                vertex_resources = dict()
                vertices_resources[id(vertex)] = vertex_resources
                vertex_resources["cores"] = \
                    DEFAULT_NOUMBER_OF_CORES_USED_PER_PARTITIONED_VERTEX
                vertex_resources["sdram"] = \
                    int(vertex.resources_required.sdram.get_value())

            vertex_outgoing_partitions = \
                partitioned_graph.outgoing_edges_partitions_from_vertex(vertex)

            # handle the vertex edges
            for partition_id in vertex_outgoing_partitions:
                partition = vertex_outgoing_partitions[partition_id]
                hyper_edge_dict = dict()
                edges_resources[str(id(partition))] = hyper_edge_dict
                hyper_edge_dict["source"] = str(id(vertex))

                sinks_string = []
                weight = 0

                for edge in partition.edges:
                    sinks_string.append(str(id(edge.post_subvertex)))
                    weight += edge.weight
                hyper_edge_dict['sinks'] = sinks_string
                hyper_edge_dict["weight"] = weight
                hyper_edge_dict["type"] = partition.type.name.lower()
            progress_bar.update()

        file_to_write = open(file_path, "w")
        json.dump(json_graph_dictory_rep, file_to_write)
        file_to_write.close()

        # validate the schema
        partitioned_graph_schema_file_path = os.path.join(
            os.path.dirname(file_format_schemas.__file__),
            "partitioned_graph.json")
        file_to_read = open(partitioned_graph_schema_file_path, "r")
        partitioned_graph_schema = json.load(file_to_read)
        jsonschema.validate(json_graph_dictory_rep, partitioned_graph_schema)

        progress_bar.end()

        return {"partitioned_graph": file_path}
def check_types_of_edge_constraint(machine_graph):
    """ Go through the graph for operations and checks that the constraints\
        are compatible.

    :param machine_graph: the graph to search through
    :rtype: None:
    """
    for partition in machine_graph.outgoing_edge_partitions:
        fixed_key = locate_constraints_of_type(
            partition.constraints, FixedKeyAndMaskConstraint)

        fixed_mask = locate_constraints_of_type(
            partition.constraints, FixedMaskConstraint)

        fixed_field = locate_constraints_of_type(
            partition.constraints, FixedKeyFieldConstraint)

        flexi_field = locate_constraints_of_type(
            partition.constraints, FlexiKeyFieldConstraint)

        if (len(fixed_key) > 1 or len(fixed_field) > 1 or
                len(fixed_mask) > 1 or len(flexi_field) > 1):
            raise PacmanConfigurationException(
                "There are more than one of the same constraint type on "
                "the partition {} starting at {}. Please fix and try again."
                .format(partition.identifier, partition.pre_vertex))

        fixed_key = len(fixed_key) == 1
        fixed_mask = len(fixed_mask) == 1
        fixed_field = len(fixed_field) == 1
        flexi_field = len(flexi_field) == 1

        # check for fixed key and a fixed mask. as these should have been
        # merged before now
        if fixed_key and fixed_mask:
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a fixed key and fixed "
                "mask constraint. These can be merged together, but is "
                "deemed an error here"
                .format(partition.identifer, partition.pre_vertex))

        # check for a fixed key and fixed field, as these are incompatible
        if fixed_key and fixed_field:
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a fixed key and fixed "
                "field constraint. These may be merge-able together, but "
                "is deemed an error here"
                .format(partition.identifer, partition.pre_vertex))

        # check that a fixed mask and fixed field have compatible masks
        if fixed_mask and fixed_field:
            _check_masks_are_correct(partition)

        # check that if there's a flexible field, and something else, throw
        # error
        if flexi_field and (fixed_mask or fixed_key or fixed_field):
            raise PacmanConfigurationException(
                "The partition {} starting at {} has a flexible field and "
                "another fixed constraint. These maybe be merge-able, but "
                "is deemed an error here"
                .format(partition.identifer, partition.pre_vertex))
    def __call__(self, machine_graph, n_keys_map, routing_tables):
        """
        :param MachineGraph machine_graph:
        :param AbstractMachinePartitionNKeysMap n_keys_map:
        :param MulticastRoutingTableByPartition routing_tables:
        :rtype: RoutingInfo
        """
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint, FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint
            ],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, _shared_keys, fixed_masks, fixed_fields, continuous,
         noncontinuous) = get_mulitcast_edge_groups(machine_graph)

        # Even non-continuous keys will be continuous
        continuous.extend(noncontinuous)

        # Go through the groups and allocate keys
        progress = ProgressBar(machine_graph.n_outgoing_edge_partitions,
                               "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            fixed_mask = None
            fixed_key_and_mask_constraint = locate_constraints_of_type(
                group.constraints, FixedKeyAndMaskConstraint)[0]

            # attempt to allocate them
            self._allocate_fixed_keys_and_masks(
                fixed_key_and_mask_constraint.keys_and_masks, fixed_mask)

            # update the pacman data objects
            self._update_routing_objects(
                fixed_key_and_mask_constraint.keys_and_masks, routing_infos,
                group)
            continuous.remove(group)

        for group in progress.over(fixed_masks, False):
            # get mask and fields if need be
            fixed_mask = locate_constraints_of_type(
                group.constraints, FixedMaskConstraint)[0].mask

            fields = None
            if group in fixed_fields:
                fields = locate_constraints_of_type(
                    group.constraints, FixedKeyFieldConstraint)[0].fields
                fixed_fields.remove(group)

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                fixed_mask, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        for group in progress.over(fixed_fields, False):
            fields = locate_constraints_of_type(
                group.constraints, FixedKeyFieldConstraint)[0].fields

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                None, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        # Sort the rest of the groups, using the routing tables for guidance
        # Group partitions by those which share routes in any table
        partition_groups = OrderedDict()
        routers = reversed(
            sorted(
                routing_tables.get_routers(),
                key=lambda item: len(
                    routing_tables.get_entries_for_router(item[0], item[1]))))
        for x, y in routers:

            # Find all partitions that share a route in this table
            partitions_by_route = defaultdict(OrderedSet)
            routing_table = routing_tables.get_entries_for_router(x, y)
            for partition, entry in iteritems(routing_table):
                if partition in continuous:
                    entry_hash = sum(1 << i for i in entry.link_ids)
                    entry_hash += sum(1 << (i + 6)
                                      for i in entry.processor_ids)
                    partitions_by_route[entry_hash].add(partition)

            for entry_hash, partitions in iteritems(partitions_by_route):
                found_groups = list()
                for partition in partitions:
                    if partition in partition_groups:
                        found_groups.append(partition_groups[partition])

                if not found_groups:
                    # If no group was found, create a new one
                    for partition in partitions:
                        partition_groups[partition] = partitions

                elif len(found_groups) == 1:
                    # If a single other group was found, merge it
                    for partition in partitions:
                        found_groups[0].add(partition)
                        partition_groups[partition] = found_groups[0]

                else:
                    # Merge the groups
                    new_group = partitions
                    for group in found_groups:
                        for partition in group:
                            new_group.add(partition)
                    for partition in new_group:
                        partition_groups[partition] = new_group

        # Sort partitions by largest group
        continuous = list(
            OrderedSet(tuple(group) for group in itervalues(partition_groups)))

        for group in reversed(sorted(continuous, key=len)):
            for partition in progress.over(group, False):
                keys_and_masks = self._allocate_keys_and_masks(
                    None, None, n_keys_map.n_keys_for_partition(partition))

                # update the pacman data objects
                self._update_routing_objects(keys_and_masks, routing_infos,
                                             partition)

        progress.end()
        return routing_infos