Exemple #1
0
    def __call__(self, machine_graph, machine):
        """ Place a machine_graph so that each vertex is placed on a core

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(machine)
        for vertex in progress.over(vertices):
            # Create and store a new placement anywhere on the board
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, None)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)
        return placements
Exemple #2
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """ Place each vertex in a machine graph on a core in the machine.

        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine: A SpiNNaker machine object.
        :param int plan_n_timesteps: number of timesteps to plan for
        :return placements: Placements of vertices on the machine
        :rtype: Placements
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_random_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in vertices_placed:
                vertices_placed.update(
                    self._place_vertex(vertex, resource_tracker, machine,
                                       placements, vertices_on_same_chip))
        return placements
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """ Place a machine_graph so that each vertex is placed on a core

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(machine, plan_n_timesteps)
        for vertex in progress.over(vertices):
            # Create and store a new placement anywhere on the board
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, None)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)
        return placements
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """ Place each vertex in a machine graph on a core in the machine.

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine: A SpiNNaker machine object.
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return placements: Placements of vertices on the machine
        :rtype :py:class:`pacman.model.placements.Placements`
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_random_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in vertices_placed:
                vertices_placed.update(self._place_vertex(
                    vertex, resource_tracker, machine, placements,
                    vertices_on_same_chip))
        return placements
def __gather_placements_with_tags(placement, collector):
    """
    :param Placement placement:
    :param list(Placement) collector:
    """
    requires = placement.vertex.resources_required
    if requires.iptags or requires.reverse_iptags:
        ResourceTracker.check_constraints([placement.vertex])
        collector.append(placement)
 def _check_constraints(self,
                        vertices,
                        additional_placement_constraints=None):
     if additional_placement_constraints is not None:
         placement_constraints = additional_placement_constraints
     else:
         placement_constraints = {}
     ResourceTracker.check_constraints(
         vertices, additional_placement_constraints=placement_constraints)
 def _check_constraints(
         self, vertices, additional_placement_constraints=None):
     placement_constraints = {
         RadialPlacementFromChipConstraint, SameChipAsConstraint
     }
     if additional_placement_constraints is not None:
         placement_constraints.update(additional_placement_constraints)
     ResourceTracker.check_constraints(
         vertices, additional_placement_constraints=placement_constraints)
Exemple #8
0
 def _check_constraints(self,
                        vertices,
                        additional_placement_constraints=None):
     placement_constraints = {
         RadialPlacementFromChipConstraint, SameChipAsConstraint
     }
     if additional_placement_constraints is not None:
         placement_constraints.update(additional_placement_constraints)
     ResourceTracker.check_constraints(
         vertices, additional_placement_constraints=placement_constraints)
Exemple #9
0
    def test_n_cores_available(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        chip = machine.get_chip_at(0, 0)
        preallocated_resources = PreAllocatedResourceContainer(
            specific_core_resources=[
                SpecificCoreResource(chip=chip, cores=[1])
            ],
            core_resources=[CoreResource(chip=chip, n_cores=2)])
        tracker = ResourceTracker(
            machine,
            plan_n_timesteps=None,
            preallocated_resources=preallocated_resources)

        # Should be 14 cores = 18 - 1 monitor - 1 specific core - 2 other cores
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 14)

        # Should be 0 since the core is already pre allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 1), 0)

        # Should be 1 since the core is not pre allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 2), 1)

        # Should be 0 since the core is monitor
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 0), 0)

        # Allocate a core
        tracker._allocate_core(chip, (0, 0), 2)

        # Should be 13 cores as one now allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 13)
 def test_allocate_resources_when_chip_used(self):
     router = Router([])
     sdram = SDRAM()
     empty_chip = Chip(
         0, 0, [], router, sdram, 0, 0, "127.0.0.1",
         virtual=False, tag_ids=[1])
     machine = Machine([empty_chip], 0, 0)
     resource_tracker = ResourceTracker(machine, plan_n_timesteps=None)
     with self.assertRaises(PacmanValueError):
         resource_tracker.allocate_resources(
             ResourceContainer(sdram=ConstantSDRAM(1024)))
    def test_deallocation_of_resources(self):
        machine = VirtualMachine(
            width=2, height=2, n_cpus_per_chip=18, with_monitors=True)
        chip_sdram = machine.get_chip_at(1, 1).sdram.size
        res_sdram = 12345

        tracker = ResourceTracker(machine, plan_n_timesteps=None,
                                  preallocated_resources=None)

        sdram_res = ConstantSDRAM(res_sdram)
        resources = ResourceContainer(sdram=sdram_res)
        chip_0 = machine.get_chip_at(0, 0)

        # verify core tracker is empty
        if (0, 0) in tracker._core_tracker:
            raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))

        # allocate some res
        chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \
            tracker.allocate_resources(resources, [(0, 0)])

        # verify chips used is updated
        cores = list(tracker._core_tracker[(0, 0)])
        self.assertEqual(len(cores), chip_0.n_user_processors - 1)

        # verify sdram used is updated
        sdram = tracker._sdram_tracker[(0, 0)]
        self.assertEqual(sdram, chip_sdram-res_sdram)

        if (0, 0) not in tracker._chips_used:
            raise Exception("should exist")

        # deallocate res
        tracker.unallocate_resources(
            chip_x, chip_y, processor_id, resources, ip_tags, reverse_ip_tags)

        # verify chips used is updated
        if ((0, 0) in tracker._core_tracker and
                len(tracker._core_tracker[(0, 0)]) !=
                chip_0.n_user_processors):
            raise Exception("shouldn't exist or should be right size")

        if (0, 0) in tracker._chips_used:
            raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))
Exemple #12
0
    def test_deallocation_of_resources(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        chip_sdram = machine.get_chip_at(1, 1).sdram.size
        res_sdram = 12345

        tracker = ResourceTracker(machine,
                                  plan_n_timesteps=None,
                                  preallocated_resources=None)

        sdram_res = ConstantSDRAM(res_sdram)
        resources = ResourceContainer(sdram=sdram_res)
        chip_0 = machine.get_chip_at(0, 0)

        # verify core tracker is empty
        if (0, 0) in tracker._core_tracker:
            raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))

        # allocate some res
        chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \
            tracker.allocate_resources(resources, [(0, 0)])

        # verify chips used is updated
        cores = list(tracker._core_tracker[(0, 0)])
        self.assertEqual(len(cores), chip_0.n_user_processors - 1)

        # verify sdram used is updated
        sdram = tracker._sdram_tracker[(0, 0)]
        self.assertEqual(sdram, chip_sdram - res_sdram)

        if (0, 0) not in tracker._chips_used:
            raise Exception("should exist")

        # deallocate res
        tracker.unallocate_resources(chip_x, chip_y, processor_id, resources,
                                     ip_tags, reverse_ip_tags)

        # verify chips used is updated
        if ((0, 0) in tracker._core_tracker and len(
                tracker._core_tracker[(0, 0)]) != chip_0.n_user_processors):
            raise Exception("shouldn't exist or should be right size")

        if (0, 0) in tracker._chips_used:
            raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))
    def test_n_cores_available(self):
        machine = VirtualMachine(
            width=2, height=2, n_cpus_per_chip=18, with_monitors=True)
        chip = machine.get_chip_at(0, 0)
        preallocated_resources = PreAllocatedResourceContainer(
            specific_core_resources=[
                SpecificCoreResource(chip=chip, cores=[1])],
            core_resources=[
                CoreResource(chip=chip, n_cores=2)])
        tracker = ResourceTracker(
            machine, plan_n_timesteps=None,
            preallocated_resources=preallocated_resources)

        # Should be 14 cores = 18 - 1 monitor - 1 specific core - 2 other cores
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 14)

        # Should be 0 since the core is already pre allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 1), 0)

        # Should be 1 since the core is not pre allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 2), 1)

        # Should be 0 since the core is monitor
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 0), 0)

        # Allocate a core
        tracker._allocate_core(chip, (0, 0), 2)

        # Should be 13 cores as one now allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 13)
    def __call__(self,
                 nengo_operator_graph,
                 machine,
                 nengo_random_number_generator,
                 pre_allocated_resources=None):
        machine_graph = MachineGraph(label=constants.MACHINE_GRAPH_LABEL)
        graph_mapper = GraphMapper()

        self._resource_tracker = ResourceTracker(
            machine, preallocated_resources=pre_allocated_resources)

        progress_bar = ProgressBar(
            total_number_of_things_to_do=(
                len(nengo_operator_graph.vertices) +
                len(nengo_operator_graph.outgoing_edge_partitions)),
            string_describing_what_being_progressed="partitioning")

        # convert application vertices into machine vertices
        for operator in progress_bar.over(nengo_operator_graph.vertices,
                                          False):

            # create the machine verts
            operator.create_machine_vertices(self._resource_tracker,
                                             machine_graph, graph_mapper)

        self._handle_edges(nengo_operator_graph, machine_graph, graph_mapper,
                           progress_bar, nengo_random_number_generator)

        return machine_graph, graph_mapper, self._resource_tracker.chips_used
    def __call__(self, machine, placements):
        """ See :py:meth:`AbstractTagAllocatorAlgorithm.allocate_tags`
        """

        resource_tracker = ResourceTracker(machine)

        # Keep track of ports allocated to reverse IP tags and tags that still
        # need a port to be allocated
        ports_to_allocate = dict()
        tags_to_allocate_ports = list()

        # Check that the algorithm can handle the constraints
        progress = ProgressBar(placements.n_placements, "Discovering tags")
        placements_with_tags = list()
        for placement in progress.over(placements.placements):
            self._gather_placements_with_tags(placement, placements_with_tags)

        # Go through and allocate the IP tags and constrained reverse IP tags
        tags = Tags()
        progress = ProgressBar(placements_with_tags, "Allocating tags")
        for placement in progress.over(placements_with_tags):
            self._allocate_tags_for_placement(placement, resource_tracker,
                                              tags, ports_to_allocate,
                                              tags_to_allocate_ports)

        # Finally allocate ports to the unconstrained reverse IP tags
        self._allocate_ports_for_reverse_ip_tags(tags_to_allocate_ports,
                                                 ports_to_allocate, tags)

        return list(tags.ip_tags), list(tags.reverse_ip_tags), tags
Exemple #16
0
    def _check_constraints(
            self, vertices, additional_placement_constraints=None):
        """ Ensure that the algorithm conforms to any required constraints.

        :param list(AbstractVertex) vertices:
            The vertices for which to check the constraints
        :param set(AbstractPlacerConstraint) additional_placement_constraints:
            Additional placement constraints supported by the algorithm doing
            this check
        """

        placement_constraints = {SameChipAsConstraint}
        if additional_placement_constraints is not None:
            placement_constraints.update(additional_placement_constraints)
        ResourceTracker.check_constraints(
            vertices, additional_placement_constraints=placement_constraints)
Exemple #17
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine: A SpiNNaker machine object.
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: Placements of vertices on the machine
        :rtype: Placements
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(
            machine_graph.vertices,
            additional_placement_constraints={SameChipAsConstraint})

        # in order to test isomorphism include:
        # placements_copy = Placements()
        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        progress = ProgressBar(
            machine_graph.n_vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_hilbert_chips(machine))

        # get vertices which must be placed on the same chip
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)

        # iterate over vertices and generate placements
        all_vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in all_vertices_placed:
                vertices_placed = self._place_vertex(
                    vertex, resource_tracker, machine,
                    placements, vertices_on_same_chip)
                all_vertices_placed.update(vertices_placed)
        return placements
    def __setup_objects(self, app_graph, machine, plan_n_time_steps,
                        pre_allocated_resources):
        """ sets up the machine_graph, resource_tracker, vertices, \
            progress bar.

        :param ApplicationGraph app_graph: app graph
        :param ~spinn_machine.Machine machine: machine
        :param int plan_n_time_steps: the number of time steps to run for.
        :param pre_allocated_resources: pre allocated res from other systems.
        :type PreAllocatedResourceContainer or None
        :return: (machine graph, res tracker, verts, progress bar)
        :rtype: tuple(MachineGraph, ResourceTracker, list(ApplicationVertex),
            ~.ProgressBar)
        """
        # Load the vertices and create the machine_graph to fill
        machine_graph = MachineGraph(label="partitioned graph for {}".format(
            app_graph.label),
                                     application_graph=app_graph)

        resource_tracker = ResourceTracker(
            machine,
            plan_n_time_steps,
            preallocated_resources=pre_allocated_resources)

        # sort out vertex's by placement constraints
        vertices = sort_vertices_by_known_constraints(app_graph.vertices)

        # Group vertices that are supposed to be the same size
        self.order_vertices_for_dependent_splitters(vertices)

        # Set up the progress
        progress = ProgressBar(len(app_graph.vertices),
                               self.__PROGRESS_BAR_VERTICES)

        return machine_graph, resource_tracker, vertices, progress
Exemple #19
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: A set of placements
        :rtype: Placements
        :raise PacmanPlaceException:
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        all_vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in all_vertices_placed:
                vertices_placed = self._place_vertex(vertex, resource_tracker,
                                                     machine, placements,
                                                     vertices_on_same_chip,
                                                     machine_graph)
                all_vertices_placed.update(vertices_placed)
        return placements
 def test_allocate_resources_when_chip_used(self):
     router = Router([])
     sdram = SDRAM()
     empty_chip = Chip(0,
                       0, [],
                       router,
                       sdram,
                       0,
                       0,
                       "127.0.0.1",
                       virtual=False,
                       tag_ids=[1])
     machine = Machine([empty_chip], 0, 0)
     resource_tracker = ResourceTracker(machine)
     with self.assertRaises(PacmanValueError):
         resource_tracker.allocate_resources(
             ResourceContainer(sdram=SDRAMResource(1024)))
def __allocate_tags_for_placement(placement, resource_tracker, tag_collector,
                                  ports_collector, tag_port_tasks):
    """
    :param Placement placement:
    :param ResourceTracker resource_tracker:
    :param Tags tag_collector:
    :param dict(str,set(int)) ports_collector:
    :param list(_Task) tag_port_tasks:
    """
    vertex = placement.vertex
    resources = vertex.resources_required

    # Get the constraint details for the tags
    (board_address, ip_tags, reverse_ip_tags) = \
        ResourceTracker.get_ip_tag_info(resources, vertex.constraints)

    # Allocate the tags, first-come, first-served, using the fixed
    # placement of the vertex, and the required resources
    chips = [(placement.x, placement.y)]
    (_, _, _, returned_ip_tags, returned_reverse_ip_tags) = \
        resource_tracker.allocate_resources(
            resources, chips, placement.p, board_address, ip_tags,
            reverse_ip_tags)

    # Put the allocated IP tag information into the tag object
    if returned_ip_tags is not None:
        for (tag_constraint, (board_address, tag, dest_x, dest_y)) in \
                zip(ip_tags, returned_ip_tags):
            ip_tag = IPTag(
                board_address=board_address,
                destination_x=dest_x,
                destination_y=dest_y,
                tag=tag,
                ip_address=tag_constraint.ip_address,
                port=tag_constraint.port,
                strip_sdp=tag_constraint.strip_sdp,
                traffic_identifier=tag_constraint.traffic_identifier)
            tag_collector.add_ip_tag(ip_tag, vertex)

    if returned_reverse_ip_tags is None:
        return

    # Put the allocated reverse IP tag information into the tag object
    for tag_constraint, (board_address, tag) in zip(reverse_ip_tags,
                                                    returned_reverse_ip_tags):
        if board_address not in ports_collector:
            ports_collector[board_address] = OrderedSet(_BOARD_PORTS)
        if tag_constraint.port is not None:
            reverse_ip_tag = ReverseIPTag(board_address, tag,
                                          tag_constraint.port, placement.x,
                                          placement.y, placement.p,
                                          tag_constraint.sdp_port)
            tag_collector.add_reverse_ip_tag(reverse_ip_tag, vertex)

            ports_collector[board_address].discard(tag_constraint.port)
        else:
            tag_port_tasks.append(
                _Task(tag_constraint, board_address, tag, vertex, placement))
    def __call__(self,
                 app_graph,
                 machine,
                 plan_n_time_steps,
                 pre_allocated_resources=None):
        """
        :param ApplicationGraph app_graph: The application_graph to partition
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param plan_n_time_steps:
            the number of time steps to plan to run for
        :type plan_n_time_steps: int or None
        :param pre_allocated_resources:
            res needed to be preallocated before making new machine vertices
        :type pre_allocated_resources: PreAllocatedResourceContainer or None
        :return:
            A machine_graph of partitioned vertices and partitioned edges,
            and the number of chips needed to satisfy this partitioning.
        :rtype: tuple(MachineGraph, int)
        :raise PacmanPartitionException:
            If something goes wrong with the partitioning
        """

        # check resource tracker can handle constraints
        ResourceTracker.check_constraints(app_graph.vertices)

        # get the setup objects
        (machine_graph, resource_tracker, vertices,
         progress) = (self.__setup_objects(app_graph, machine,
                                           plan_n_time_steps,
                                           pre_allocated_resources))

        self.__set_max_atoms_to_splitters(app_graph)

        # Partition one vertex at a time
        for vertex in progress.over(vertices):
            vertex.splitter.split(resource_tracker, machine_graph)

        # process edges
        self.__process_machine_edges(app_graph, machine_graph,
                                     resource_tracker)

        # return the accepted things
        return machine_graph, resource_tracker.chips_used
    def _check_constraints(
            self, vertices, additional_placement_constraints=None):
        """ Ensure that the algorithm conforms to any required constraints.

        :param vertices: The vertices for which to check the constraints
        :type vertices: dict()
        :param additional_placement_constraints:\
            Additional placement constraints supported by the algorithm doing\
            this check
        :type additional_placement_constraints: set of \
            :py:class:`pacman.model.constraints.placer_constraints.AbstractPlacerConstraint`
        """

        placement_constraints = {SameChipAsConstraint}
        if additional_placement_constraints is not None:
            placement_constraints.update(additional_placement_constraints)
        ResourceTracker.check_constraints(
            vertices, additional_placement_constraints=placement_constraints)
 def test_allocate_resources_when_chip_used(self):
     router = Router([])
     sdram = SDRAM()
     empty_chip = Chip(0,
                       0,
                       1,
                       router,
                       sdram,
                       0,
                       0,
                       "127.0.0.1",
                       virtual=False,
                       tag_ids=[1])
     machine = machine_from_chips([empty_chip])
     resource_tracker = ResourceTracker(machine, plan_n_timesteps=None)
     with self.assertRaises(PacmanValueError):
         resource_tracker.allocate_resources(
             ResourceContainer(sdram=ConstantSDRAM(1024)))
Exemple #25
0
    def __call__(self, machine_graph, machine):

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_random_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in vertices_placed:
                vertices_placed.update(
                    self._place_vertex(vertex, resource_tracker, machine,
                                       placements, vertices_on_same_chip))
        return placements
    def __call__(self, machine_graph, machine):
        """
        :param machine_graph: The machine_graph to measure
        :type machine_graph:\
            :py:class:`pacman.model.graph.machine.MachineGraph`
        :return: The size of the graph in number of chips
        :rtype: int
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        ordered_vertices = sort_vertices_by_known_constraints(
            machine_graph.vertices)

        # Iterate over vertices and allocate
        progress = ProgressBar(machine_graph.n_vertices, "Measuring the graph")
        resource_tracker = ResourceTracker(machine)
        for vertex in progress.over(ordered_vertices):
            resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints)
        return len(resource_tracker.keys)
    def __call__(self, graph, machine, plan_n_timesteps):
        """
        :param graph: The application_graph to partition
        :type graph:\
            :py:class:`pacman.model.graphs.application.ApplicationGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: A machine graph
        :rtype:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :raise pacman.exceptions.PacmanPartitionException:\
            If something goes wrong with the partitioning
        """
        ResourceTracker.check_constraints(graph.vertices)
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            supported_constraints=[
                MaxVertexAtomsConstraint, FixedVertexAtomsConstraint],
            abstract_constraint_type=AbstractPartitionerConstraint)

        # start progress bar
        progress = ProgressBar(graph.n_vertices, "Partitioning graph vertices")
        machine_graph = MachineGraph("Machine graph for " + graph.label)
        graph_mapper = GraphMapper()
        resource_tracker = ResourceTracker(machine, plan_n_timesteps)

        # Partition one vertex at a time
        for vertex in progress.over(graph.vertices):
            self._partition_one_application_vertex(
                vertex, resource_tracker, machine_graph, graph_mapper,
                plan_n_timesteps)

        generate_machine_edges(machine_graph, graph_mapper, graph)

        return machine_graph, graph_mapper, resource_tracker.chips_used
    def _run(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: A set of placements
        :rtype: ~pacman.model.placements.Placements
        :raise PacmanPlaceException:
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements,
                               vertices_on_same_chip, machine_graph)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress,
                                               vertices_on_same_chip)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
    def _allocate_tags_for_placement(self, placement, resource_tracker,
                                     tag_collector, ports_collector,
                                     tag_port_tasks):
        vertex = placement.vertex
        resources = vertex.resources_required

        # Get the constraint details for the tags
        (board_address, ip_tags, reverse_ip_tags) = \
            ResourceTracker.get_ip_tag_info(resources, vertex.constraints)

        # Allocate the tags, first-come, first-served, using the fixed
        # placement of the vertex, and the required resources
        chips = [(placement.x, placement.y)]
        (_, _, _, returned_ip_tags, returned_reverse_ip_tags) = \
            resource_tracker.allocate_resources(
                resources, chips, placement.p, board_address, ip_tags,
                reverse_ip_tags)

        # Put the allocated IP tag information into the tag object
        if returned_ip_tags is not None:
            for (tag_constraint, (board_address, tag, dest_x, dest_y)) in \
                    zip(ip_tags, returned_ip_tags):
                ip_tag = IPTag(
                    board_address=board_address, destination_x=dest_x,
                    destination_y=dest_y, tag=tag,
                    ip_address=tag_constraint.ip_address,
                    port=tag_constraint.port,
                    strip_sdp=tag_constraint.strip_sdp,
                    traffic_identifier=tag_constraint.traffic_identifier)
                tag_collector.add_ip_tag(ip_tag, vertex)

        if returned_reverse_ip_tags is None:
            return

        # Put the allocated reverse IP tag information into the tag object
        for tag_constraint, (board_address, tag) in zip(
                reverse_ip_tags, returned_reverse_ip_tags):
            if board_address not in ports_collector:
                ports_collector[board_address] = OrderedSet(_BOARD_PORTS)
            if tag_constraint.port is not None:
                reverse_ip_tag = ReverseIPTag(
                    board_address, tag, tag_constraint.port,
                    placement.x, placement.y, placement.p,
                    tag_constraint.sdp_port)
                tag_collector.add_reverse_ip_tag(reverse_ip_tag, vertex)

                ports_collector[board_address].discard(tag_constraint.port)
            else:
                tag_port_tasks.append(
                    (tag_constraint, board_address, tag, vertex, placement))
    def __call__(self, graph, machine):
        """
        :param graph: The application_graph to partition
        :type graph:\
            :py:class:`pacman.model.graphs.application.ApplicationGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :return: A machine graph
        :rtype:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :raise pacman.exceptions.PacmanPartitionException:\
            If something goes wrong with the partitioning
        """
        ResourceTracker.check_constraints(graph.vertices)
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            supported_constraints=[
                MaxVertexAtomsConstraint, FixedVertexAtomsConstraint
            ],
            abstract_constraint_type=AbstractPartitionerConstraint)

        # start progress bar
        progress = ProgressBar(graph.n_vertices, "Partitioning graph vertices")
        machine_graph = MachineGraph("Machine graph for " + graph.label)
        graph_mapper = GraphMapper()
        resource_tracker = ResourceTracker(machine)

        # Partition one vertex at a time
        for vertex in progress.over(graph.vertices):
            self._partition_one_application_vertex(vertex, resource_tracker,
                                                   machine_graph, graph_mapper)

        utils.generate_machine_edges(machine_graph, graph_mapper, graph)

        return machine_graph, graph_mapper, resource_tracker.chips_used
def basic_tag_allocator(machine, plan_n_timesteps, placements):
    """
    Basic tag allocator that goes though the boards available and applies\
        the IP tags and reverse IP tags as needed.

    :param ~spinn_machine.Machine machine:
        The machine with respect to which to partition the application
        graph
    :param int plan_n_timesteps: number of timesteps to plan for
    :param Placements placements:
    :return: list of IP Tags, list of Reverse IP Tags,
        tag allocation holder
    :rtype: tuple(list(~spinn_machine.tags.IPTag),
        list(~spinn_machine.tags.ReverseIPTag), Tags)
    """
    resource_tracker = ResourceTracker(machine, plan_n_timesteps)

    # Keep track of ports allocated to reverse IP tags and tags that still
    # need a port to be allocated
    ports_to_allocate = dict()
    tags_to_allocate_ports = list()

    # Check that the algorithm can handle the constraints
    progress = ProgressBar(placements.n_placements, "Discovering tags")
    placements_with_tags = list()
    for placement in progress.over(placements.placements):
        __gather_placements_with_tags(placement, placements_with_tags)

    # Go through and allocate the IP tags and constrained reverse IP tags
    tags = Tags()
    progress = ProgressBar(placements_with_tags, "Allocating tags")
    for placement in progress.over(placements_with_tags):
        __allocate_tags_for_placement(placement, resource_tracker, tags,
                                      ports_to_allocate,
                                      tags_to_allocate_ports)

    # Finally allocate ports to the unconstrained reverse IP tags
    __allocate_ports_for_reverse_ip_tags(tags_to_allocate_ports,
                                         ports_to_allocate, tags)

    return tags
    def __call__(self, machine_graph, machine):

        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
    def _do_allocation(self, vertices, machine, same_chip_vertex_groups,
                       machine_graph):
        placements = Placements()

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # iterate over vertices
        for vertex_list in vertices:
            # if too many one to ones to fit on a chip, allocate individually
            if len(vertex_list) > machine.maximum_user_cores_on_chip:
                for vertex in progress.over(vertex_list, False):
                    self._allocate_individual(vertex, placements,
                                              resource_tracker,
                                              same_chip_vertex_groups,
                                              all_vertices_placed)
                continue
            allocations = self._get_allocations(resource_tracker, vertex_list)
            if allocations is not None:
                # allocate cores to vertices
                for vertex, (x, y, p, _,
                             _) in progress.over(zip(vertex_list, allocations),
                                                 False):
                    placements.add_placement(Placement(vertex, x, y, p))
            else:
                # Something went wrong, try to allocate each individually
                for vertex in progress.over(vertex_list, False):
                    self._allocate_individual(vertex, placements,
                                              resource_tracker,
                                              same_chip_vertex_groups,
                                              all_vertices_placed)
        progress.end()
        return placements
    def __call__(self, machine_graph, machine):
        """
        :param machine_graph: The machine_graph to measure
        :type machine_graph:\
            :py:class:`pacman.model.graph.machine.MachineGraph`
        :return: The size of the graph in number of chips
        :rtype: int
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        ordered_vertices = sort_vertices_by_known_constraints(
            machine_graph.vertices)

        # Iterate over vertices and allocate
        progress = ProgressBar(machine_graph.n_vertices, "Measuring the graph")
        resource_tracker = ResourceTracker(machine)
        for vertex in progress.over(ordered_vertices):
            resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints)
        return len(resource_tracker.keys)
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """
        :param ~.MachineGraph machine_graph:
        :param ~.Machine machine:
        :param int plan_n_timesteps:
        :rtype: int
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        ordered_vertices = sort_vertices_by_known_constraints(
            machine_graph.vertices)

        # Iterate over vertices and allocate
        progress = ProgressBar(machine_graph.n_vertices, "Measuring the graph")

        resource_tracker = ResourceTracker(machine, plan_n_timesteps)
        for vertex in progress.over(ordered_vertices):
            resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints)
        return len(resource_tracker.keys)
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """
        :param list(set(MachineVertex)) one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(MachineVertex, collection(MachineVertex))
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_utilities.progress_bar.ProgressBar progress:
        :rtype: Placements
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtual):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed, progress,
                    machine_graph)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)
            if not chips:
                chips = None

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed, machine_graph)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress, machine_graph)

        progress.end()
        return placements
    def test_n_cores_available(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        preallocated_resources = PreAllocatedResourceContainer()
        preallocated_resources.add_cores_all(2)
        preallocated_resources.add_cores_ethernet(3)
        tracker = ResourceTracker(
            machine,
            plan_n_timesteps=None,
            preallocated_resources=preallocated_resources)

        # Should be 15 cores = 18 - 1 Monitor -3 ethernet -2 all cores
        self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 12)

        # Should be 15 cores = 18 -2 other cores
        self.assertEqual(tracker._get_core_tracker(0, 1).n_cores_available, 15)

        # Should be True since the core is not pre allocated
        self.assertTrue(tracker._get_core_tracker(0, 0).is_core_available(2))

        # Should be False since the core is monitor
        self.assertFalse(tracker._get_core_tracker(0, 0).is_core_available(0))

        # Allocate a core
        tracker._get_core_tracker(0, 0).allocate(2)

        # Should be 11 cores as one now allocated
        self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 11)

        with self.assertRaises(PacmanInvalidParameterException):
            tracker._get_core_tracker(2, 2)
    def __call__(
            self, graph, machine, plan_n_timesteps,
            preallocated_resources=None):
        """
        :param graph: The application_graph to partition
        :type graph:\
            :py:class:`pacman.model.graph.application.ApplicationGraph`
        :param machine: The machine with respect to which to partition the\
            application_graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: \
            A machine_graph of partitioned vertices and partitioned edges
        :rtype:\
            :py:class:`pacman.model.graph.machine.MachineGraph`
        :raise pacman.exceptions.PacmanPartitionException: \
            If something goes wrong with the partitioning
        """
        ResourceTracker.check_constraints(graph.vertices)
        utils.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            abstract_constraint_type=AbstractPartitionerConstraint,
            supported_constraints=[MaxVertexAtomsConstraint,
                                   SameAtomsAsVertexConstraint,
                                   FixedVertexAtomsConstraint])

        # Load the vertices and create the machine_graph to fill
        machine_graph = MachineGraph(
            label="partitioned graph for {}".format(graph.label))
        graph_mapper = GraphMapper()

        # sort out vertex's by placement constraints
        vertices = sort_vertices_by_known_constraints(graph.vertices)

        # Set up the progress
        n_atoms = 0
        for vertex in vertices:
            n_atoms += vertex.n_atoms
        progress = ProgressBar(n_atoms, "Partitioning graph vertices")

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps,
            preallocated_resources=preallocated_resources)

        # Group vertices that are supposed to be the same size
        vertex_groups = get_same_size_vertex_groups(vertices)

        # Partition one vertex at a time
        for vertex in vertices:

            # check that the vertex hasn't already been partitioned
            machine_vertices = graph_mapper.get_machine_vertices(vertex)

            # if not, partition
            if machine_vertices is None:
                self._partition_vertex(
                    vertex, plan_n_timesteps, machine_graph, graph_mapper,
                    resource_tracker, progress, vertex_groups)
        progress.end()

        generate_machine_edges(machine_graph, graph_mapper, graph)

        return machine_graph, graph_mapper, resource_tracker.chips_used
 def _gather_placements_with_tags(self, placement, collector):
     if (placement.vertex.resources_required.iptags or
             placement.vertex.resources_required.reverse_iptags):
         ResourceTracker.check_constraints([placement.vertex])
         collector.append(placement)
Exemple #40
0
    def __call__(self, machine_graph, machine, n_keys_map, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: the machine graph
        :param ~spinn_machine.Machine machine: the SpiNNaker machine
        :param AbstractMachinePartitionNKeysMap n_keys_map:
            the n keys from partition map
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: placements.
        :rtype: Placements
        """
        # create progress bar
        progress_bar = ProgressBar(
            (machine_graph.n_vertices * self.ITERATIONS) + self.STEPS,
            "Placing graph vertices via spreading over an entire machine")

        # check that the algorithm can handle the constraints
        self._check_constraints(
            machine_graph.vertices,
            additional_placement_constraints={SameChipAsConstraint})
        progress_bar.update()

        # get same chip groups
        same_chip_vertex_groups = get_same_chip_vertex_groups(machine_graph)
        progress_bar.update()
        # get chip and core placed verts
        hard_chip_constraints = self._locate_hard_placement_verts(
            machine_graph)
        progress_bar.update()
        # get one to one groups
        one_to_one_groups = create_vertices_groups(
            machine_graph.vertices,
            functools.partial(self._find_one_to_one_vertices,
                              graph=machine_graph))
        progress_bar.update()

        # sort chips so that they are radial from a given point and other
        # init data structs
        chips_in_order = self._determine_chip_list(machine)
        resource_tracker = ResourceTracker(machine,
                                           plan_n_timesteps,
                                           chips=chips_in_order)
        placements = Placements()
        placed_vertices = set()
        cost_per_chip = defaultdict(int)
        progress_bar.update()

        # allocate hard ones
        for hard_vertex in hard_chip_constraints:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                hard_vertex.resources_required, hard_vertex.constraints)
            placements.add_placement(Placement(hard_vertex, x, y, p))
            placed_vertices.add(hard_vertex)
            cost_per_chip[x, y] += self._get_cost(hard_vertex, machine_graph,
                                                  n_keys_map)

        # place groups of verts that need the same chip on the same chip,
        self._place_same_chip_verts(same_chip_vertex_groups, chips_in_order,
                                    placements, progress_bar, resource_tracker,
                                    placed_vertices, cost_per_chip,
                                    machine_graph, n_keys_map)

        # place 1 group per chip if possible on same chip as any already
        # placed verts. if not then radially from it.
        self._place_one_to_one_verts(one_to_one_groups, chips_in_order,
                                     placements, progress_bar,
                                     resource_tracker, placed_vertices,
                                     cost_per_chip, machine_graph, n_keys_map,
                                     machine)

        # place vertices which don't have annoying placement constraints.
        # spread them over the chips so that they have minimal impact on the
        # overall incoming packet cost per router.
        self._place_left_over_verts(machine_graph, chips_in_order, placements,
                                    progress_bar, resource_tracker,
                                    placed_vertices, cost_per_chip, n_keys_map)
        progress_bar.end()

        # return the built placements
        return placements
 def _gather_placements_with_tags(self, placement, collector):
     if (placement.vertex.resources_required.iptags
             or placement.vertex.resources_required.reverse_iptags):
         ResourceTracker.check_constraints([placement.vertex])
         collector.append(placement)
    def _find_one_to_one_vertices(vertex, graph):
        """ Find vertices which have one to one connections with the given\
            vertex, and where their constraints don't force them onto\
            different chips.

        :param vertex: the vertex to use as a basis for one to one connections
        :param graph: the graph to look for other one to one vertices
        :return: set of one to one vertices
        """
        x, y, _ = ResourceTracker.get_chip_and_core(vertex.constraints)
        found_vertices = [vertex]
        vertices_seen = {vertex}

        # look for one to ones leaving this vertex
        outgoing = graph.get_edges_starting_at_vertex(vertex)
        vertices_to_try = [edge.post_vertex for edge in outgoing]
        while vertices_to_try:
            next_vertex = vertices_to_try.pop()
            if next_vertex not in vertices_seen:
                vertices_seen.add(next_vertex)
                post_x, post_y, _ = ResourceTracker.get_chip_and_core(
                    next_vertex.constraints)
                edges = graph.get_edges_ending_at_vertex(next_vertex)
                if is_single(edges) and not _conflict(x, y, post_x, post_y):
                    found_vertices.append(next_vertex)
                    if post_x is not None:
                        x = post_x
                    if post_y is not None:
                        y = post_y
                    outgoing = graph.get_edges_starting_at_vertex(next_vertex)
                    vertices_to_try.extend([
                        edge.post_vertex for edge in outgoing
                        if edge.post_vertex not in vertices_seen
                    ])

        # look for one to ones entering this vertex
        incoming = graph.get_edges_ending_at_vertex(vertex)
        vertices_to_try = [
            edge.pre_vertex for edge in incoming
            if edge.pre_vertex not in vertices_seen
        ]
        while vertices_to_try:
            next_vertex = vertices_to_try.pop()
            if next_vertex not in vertices_seen:
                vertices_seen.add(next_vertex)
                pre_x, pre_y, _ = ResourceTracker.get_chip_and_core(
                    next_vertex.constraints)
                edges = graph.get_edges_starting_at_vertex(next_vertex)
                if is_single(edges) and not _conflict(x, y, pre_x, pre_y):
                    found_vertices.append(next_vertex)
                    if pre_x is not None:
                        x = pre_x
                    if pre_y is not None:
                        y = pre_y
                    incoming = graph.get_edges_ending_at_vertex(next_vertex)
                    vertices_to_try.extend([
                        edge.pre_vertex for edge in incoming
                        if edge.pre_vertex not in vertices_seen
                    ])

        return found_vertices
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """

        :param one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :type one_to_one_groups:
            list(set(vertex))
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(vertex, collection(vertex))
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param progress:
        :return:
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        unconstrained = list()
        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtualVertex):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups,
                    all_vertices_placed, progress)
            else:
                unconstrained.append(vertex)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress)

        progress.end()
        return placements