Esempio n. 1
0
    def __call__(self, machine, placements):
        """ See :py:meth:`AbstractTagAllocatorAlgorithm.allocate_tags`
        """

        resource_tracker = ResourceTracker(machine)

        # Keep track of ports allocated to reverse IP tags and tags that still
        # need a port to be allocated
        ports_to_allocate = dict()
        tags_to_allocate_ports = list()

        # Check that the algorithm can handle the constraints
        progress = ProgressBar(placements.n_placements, "Discovering tags")
        placements_with_tags = list()
        for placement in progress.over(placements.placements):
            self._gather_placements_with_tags(placement, placements_with_tags)

        # Go through and allocate the IP tags and constrained reverse IP tags
        tags = Tags()
        progress = ProgressBar(placements_with_tags, "Allocating tags")
        for placement in progress.over(placements_with_tags):
            self._allocate_tags_for_placement(placement, resource_tracker,
                                              tags, ports_to_allocate,
                                              tags_to_allocate_ports)

        # Finally allocate ports to the unconstrained reverse IP tags
        self._allocate_ports_for_reverse_ip_tags(tags_to_allocate_ports,
                                                 ports_to_allocate, tags)

        return list(tags.ip_tags), list(tags.reverse_ip_tags), tags
    def test_deallocation_of_resources(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        chip_sdram = machine.get_chip_at(1, 1).sdram.size
        res_sdram = 12345

        tracker = ResourceTracker(machine,
                                  plan_n_timesteps=None,
                                  preallocated_resources=None)

        sdram_res = ConstantSDRAM(res_sdram)
        resources = ResourceContainer(sdram=sdram_res)
        chip_0 = machine.get_chip_at(0, 0)

        # verify core tracker is empty
        if (0, 0) in tracker._core_tracker:
            raise Exception("shouldnt exist")

        tracker._get_core_tracker(1, 1)

        # verify core tracker not empty
        if (1, 1) not in tracker._core_tracker:
            raise Exception("should exist")

        # verify sdram tracker
        # 0, 0 in _sdram_tracker due to the get_core_tracker(0, 0) call
        if tracker._sdram_tracker[1, 1] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[1, 1]))

        # allocate some res
        chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \
            tracker.allocate_resources(resources, [(0, 0)])

        # verify chips used is updated
        cores = list(tracker._core_tracker[(0, 0)]._cores)
        self.assertEqual(len(cores), chip_0.n_user_processors - 1)

        # verify sdram used is updated
        sdram = tracker._sdram_tracker[(0, 0)]
        self.assertEqual(sdram, chip_sdram - res_sdram)

        if (0, 0) not in tracker._chips_used:
            raise Exception("should exist")

        # deallocate res
        tracker.unallocate_resources(chip_x, chip_y, processor_id, resources,
                                     ip_tags, reverse_ip_tags)

        # verify chips used is updated
        if tracker._core_tracker[(0, 0)].n_cores_available != \
                chip_0.n_user_processors:
            raise Exception("shouldn't exist or should be right size")

        # if (0, 0) in tracker._chips_used:
        #   raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != chip_sdram:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))
Esempio n. 3
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """ Place each vertex in a machine graph on a core in the machine.

        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine: A SpiNNaker machine object.
        :param int plan_n_timesteps: number of timesteps to plan for
        :return placements: Placements of vertices on the machine
        :rtype: Placements
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_random_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in vertices_placed:
                vertices_placed.update(
                    self._place_vertex(vertex, resource_tracker, machine,
                                       placements, vertices_on_same_chip))
        return placements
Esempio n. 4
0
    def __setup_objects(self, app_graph, machine, plan_n_time_steps,
                        pre_allocated_resources):
        """ sets up the machine_graph, resource_tracker, vertices, \
            progress bar.

        :param ApplicationGraph app_graph: app graph
        :param ~spinn_machine.Machine machine: machine
        :param int plan_n_time_steps: the number of time steps to run for.
        :param pre_allocated_resources: pre allocated res from other systems.
        :type PreAllocatedResourceContainer or None
        :return: (machine graph, res tracker, verts, progress bar)
        :rtype: tuple(MachineGraph, ResourceTracker, list(ApplicationVertex),
            ~.ProgressBar)
        """
        # Load the vertices and create the machine_graph to fill
        machine_graph = MachineGraph(label="partitioned graph for {}".format(
            app_graph.label),
                                     application_graph=app_graph)

        resource_tracker = ResourceTracker(
            machine,
            plan_n_time_steps,
            preallocated_resources=pre_allocated_resources)

        # sort out vertex's by placement constraints
        vertices = sort_vertices_by_known_constraints(app_graph.vertices)

        # Group vertices that are supposed to be the same size
        self.order_vertices_for_dependent_splitters(vertices)

        # Set up the progress
        progress = ProgressBar(len(app_graph.vertices),
                               self.__PROGRESS_BAR_VERTICES)

        return machine_graph, resource_tracker, vertices, progress
    def test_n_cores_available(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        preallocated_resources = PreAllocatedResourceContainer()
        preallocated_resources.add_cores_all(2)
        preallocated_resources.add_cores_ethernet(3)
        tracker = ResourceTracker(
            machine,
            plan_n_timesteps=None,
            preallocated_resources=preallocated_resources)

        # Should be 15 cores = 18 - 1 Monitor -3 ethernet -2 all cores
        self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 12)

        # Should be 15 cores = 18 -2 other cores
        self.assertEqual(tracker._get_core_tracker(0, 1).n_cores_available, 15)

        # Should be True since the core is not pre allocated
        self.assertTrue(tracker._get_core_tracker(0, 0).is_core_available(2))

        # Should be False since the core is monitor
        self.assertFalse(tracker._get_core_tracker(0, 0).is_core_available(0))

        # Allocate a core
        tracker._get_core_tracker(0, 0).allocate(2)

        # Should be 11 cores as one now allocated
        self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 11)

        with self.assertRaises(PacmanInvalidParameterException):
            tracker._get_core_tracker(2, 2)
Esempio n. 6
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine: A SpiNNaker machine object.
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: Placements of vertices on the machine
        :rtype: Placements
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(
            machine_graph.vertices,
            additional_placement_constraints={SameChipAsConstraint})

        # in order to test isomorphism include:
        # placements_copy = Placements()
        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        progress = ProgressBar(
            machine_graph.n_vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_hilbert_chips(machine))

        # get vertices which must be placed on the same chip
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)

        # iterate over vertices and generate placements
        all_vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in all_vertices_placed:
                vertices_placed = self._place_vertex(
                    vertex, resource_tracker, machine,
                    placements, vertices_on_same_chip)
                all_vertices_placed.update(vertices_placed)
        return placements
Esempio n. 7
0
    def test_n_cores_available(self):
        machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18)
        chip = machine.get_chip_at(0, 0)
        preallocated_resources = PreAllocatedResourceContainer(
            specific_core_resources=[
                SpecificCoreResource(chip=chip, cores=[1])
            ],
            core_resources=[CoreResource(chip=chip, n_cores=2)])
        tracker = ResourceTracker(
            machine,
            plan_n_timesteps=None,
            preallocated_resources=preallocated_resources)

        # Should be 14 cores = 18 - 1 monitor - 1 specific core - 2 other cores
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 14)

        # Should be 0 since the core is already pre allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 1), 0)

        # Should be 1 since the core is not pre allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 2), 1)

        # Should be 0 since the core is monitor
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), 0), 0)

        # Allocate a core
        tracker._allocate_core(chip, (0, 0), 2)

        # Should be 13 cores as one now allocated
        self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 13)
Esempio n. 8
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: A set of placements
        :rtype: Placements
        :raise PacmanPlaceException:
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        all_vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in all_vertices_placed:
                vertices_placed = self._place_vertex(vertex, resource_tracker,
                                                     machine, placements,
                                                     vertices_on_same_chip,
                                                     machine_graph)
                all_vertices_placed.update(vertices_placed)
        return placements
Esempio n. 9
0
    def __call__(self, machine_graph, machine):
        """ Place a machine_graph so that each vertex is placed on a core

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(machine)
        for vertex in progress.over(vertices):
            # Create and store a new placement anywhere on the board
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, None)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)
        return placements
    def __call__(self,
                 nengo_operator_graph,
                 machine,
                 nengo_random_number_generator,
                 pre_allocated_resources=None):
        machine_graph = MachineGraph(label=constants.MACHINE_GRAPH_LABEL)
        graph_mapper = GraphMapper()

        self._resource_tracker = ResourceTracker(
            machine, preallocated_resources=pre_allocated_resources)

        progress_bar = ProgressBar(
            total_number_of_things_to_do=(
                len(nengo_operator_graph.vertices) +
                len(nengo_operator_graph.outgoing_edge_partitions)),
            string_describing_what_being_progressed="partitioning")

        # convert application vertices into machine vertices
        for operator in progress_bar.over(nengo_operator_graph.vertices,
                                          False):

            # create the machine verts
            operator.create_machine_vertices(self._resource_tracker,
                                             machine_graph, graph_mapper)

        self._handle_edges(nengo_operator_graph, machine_graph, graph_mapper,
                           progress_bar, nengo_random_number_generator)

        return machine_graph, graph_mapper, self._resource_tracker.chips_used
Esempio n. 11
0
    def test_deallocation_of_resources(self):
        machine = VirtualMachine(width=2,
                                 height=2,
                                 n_cpus_per_chip=18,
                                 with_monitors=True,
                                 sdram_per_chip=12346)
        tracker = ResourceTracker(machine, preallocated_resources=None)

        sdram_res = SDRAMResource(12345)
        resources = ResourceContainer(sdram=sdram_res)
        chip_0 = machine.get_chip_at(0, 0)

        # verify core tracker is empty
        if (0, 0) in tracker._core_tracker:
            raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != -12346:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))

        # allocate some res
        chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \
            tracker.allocate_resources(resources, [(0, 0)])

        # verify chips used is updated
        cores = list(tracker._core_tracker[(0, 0)])
        self.assertEqual(len(cores), chip_0.n_user_processors - 1)

        # verify sdram used is updated
        sdram = tracker._sdram_tracker[(0, 0)]
        self.assertEqual(sdram, -1)

        if (0, 0) not in tracker._chips_used:
            raise Exception("should exist")

        # deallocate res
        tracker.unallocate_resources(chip_x, chip_y, processor_id, resources,
                                     ip_tags, reverse_ip_tags)

        # verify chips used is updated
        if ((0, 0) in tracker._core_tracker and len(
                tracker._core_tracker[(0, 0)]) != chip_0.n_user_processors):
            raise Exception("shouldn't exist or should be right size")

        if (0, 0) in tracker._chips_used:
            raise Exception("shouldnt exist")

        # verify sdram tracker
        if tracker._sdram_tracker[0, 0] != -12346:
            raise Exception("incorrect sdram of {}".format(
                tracker._sdram_tracker[0, 0]))
    def _run(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: A set of placements
        :rtype: ~pacman.model.placements.Placements
        :raise PacmanPlaceException:
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements,
                               vertices_on_same_chip, machine_graph)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress,
                                               vertices_on_same_chip)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
Esempio n. 13
0
 def test_allocate_resources_when_chip_used(self):
     router = Router([])
     sdram = SDRAM()
     empty_chip = Chip(0,
                       0, [],
                       router,
                       sdram,
                       0,
                       0,
                       "127.0.0.1",
                       virtual=False,
                       tag_ids=[1])
     machine = Machine([empty_chip], 0, 0)
     resource_tracker = ResourceTracker(machine)
     with self.assertRaises(PacmanValueError):
         resource_tracker.allocate_resources(
             ResourceContainer(sdram=SDRAMResource(1024)))
 def test_allocate_resources_when_chip_used(self):
     router = Router([])
     sdram = SDRAM()
     empty_chip = Chip(0,
                       0,
                       1,
                       router,
                       sdram,
                       0,
                       0,
                       "127.0.0.1",
                       virtual=False,
                       tag_ids=[1])
     machine = machine_from_chips([empty_chip])
     resource_tracker = ResourceTracker(machine, plan_n_timesteps=None)
     with self.assertRaises(PacmanValueError):
         resource_tracker.allocate_resources(
             ResourceContainer(sdram=ConstantSDRAM(1024)))
def basic_tag_allocator(machine, plan_n_timesteps, placements):
    """
    Basic tag allocator that goes though the boards available and applies\
        the IP tags and reverse IP tags as needed.

    :param ~spinn_machine.Machine machine:
        The machine with respect to which to partition the application
        graph
    :param int plan_n_timesteps: number of timesteps to plan for
    :param Placements placements:
    :return: list of IP Tags, list of Reverse IP Tags,
        tag allocation holder
    :rtype: tuple(list(~spinn_machine.tags.IPTag),
        list(~spinn_machine.tags.ReverseIPTag), Tags)
    """
    resource_tracker = ResourceTracker(machine, plan_n_timesteps)

    # Keep track of ports allocated to reverse IP tags and tags that still
    # need a port to be allocated
    ports_to_allocate = dict()
    tags_to_allocate_ports = list()

    # Check that the algorithm can handle the constraints
    progress = ProgressBar(placements.n_placements, "Discovering tags")
    placements_with_tags = list()
    for placement in progress.over(placements.placements):
        __gather_placements_with_tags(placement, placements_with_tags)

    # Go through and allocate the IP tags and constrained reverse IP tags
    tags = Tags()
    progress = ProgressBar(placements_with_tags, "Allocating tags")
    for placement in progress.over(placements_with_tags):
        __allocate_tags_for_placement(placement, resource_tracker, tags,
                                      ports_to_allocate,
                                      tags_to_allocate_ports)

    # Finally allocate ports to the unconstrained reverse IP tags
    __allocate_ports_for_reverse_ip_tags(tags_to_allocate_ports,
                                         ports_to_allocate, tags)

    return tags
Esempio n. 16
0
    def __call__(self, machine_graph, machine):
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        all_vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in all_vertices_placed:
                vertices_placed = self._place_vertex(vertex, resource_tracker,
                                                     machine, placements,
                                                     vertices_on_same_chip)
                all_vertices_placed.update(vertices_placed)
        return placements
Esempio n. 17
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """
        :param ~.MachineGraph machine_graph:
        :param ~.Machine machine:
        :param int plan_n_timesteps:
        :rtype: int
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        ordered_vertices = sort_vertices_by_known_constraints(
            machine_graph.vertices)

        # Iterate over vertices and allocate
        progress = ProgressBar(machine_graph.n_vertices, "Measuring the graph")

        resource_tracker = ResourceTracker(machine, plan_n_timesteps)
        for vertex in progress.over(ordered_vertices):
            resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints)
        return len(resource_tracker.keys)
    def __call__(self, machine_graph, machine):
        """
        :param machine_graph: The machine_graph to measure
        :type machine_graph:\
            :py:class:`pacman.model.graph.machine.MachineGraph`
        :return: The size of the graph in number of chips
        :rtype: int
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        ordered_vertices = sort_vertices_by_known_constraints(
            machine_graph.vertices)

        # Iterate over vertices and allocate
        progress = ProgressBar(machine_graph.n_vertices, "Measuring the graph")
        resource_tracker = ResourceTracker(machine)
        for vertex in progress.over(ordered_vertices):
            resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints)
        return len(resource_tracker.keys)
Esempio n. 19
0
    def __call__(self, machine_graph, machine):

        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
Esempio n. 20
0
    def _do_allocation(self, vertices, machine, same_chip_vertex_groups,
                       machine_graph):
        placements = Placements()

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # iterate over vertices
        for vertex_list in vertices:
            # if too many one to ones to fit on a chip, allocate individually
            if len(vertex_list) > machine.maximum_user_cores_on_chip:
                for vertex in progress.over(vertex_list, False):
                    self._allocate_individual(vertex, placements,
                                              resource_tracker,
                                              same_chip_vertex_groups,
                                              all_vertices_placed)
                continue
            allocations = self._get_allocations(resource_tracker, vertex_list)
            if allocations is not None:
                # allocate cores to vertices
                for vertex, (x, y, p, _,
                             _) in progress.over(zip(vertex_list, allocations),
                                                 False):
                    placements.add_placement(Placement(vertex, x, y, p))
            else:
                # Something went wrong, try to allocate each individually
                for vertex in progress.over(vertex_list, False):
                    self._allocate_individual(vertex, placements,
                                              resource_tracker,
                                              same_chip_vertex_groups,
                                              all_vertices_placed)
        progress.end()
        return placements
Esempio n. 21
0
    def __call__(self, graph, machine):
        """
        :param graph: The application_graph to partition
        :type graph:\
            :py:class:`pacman.model.graphs.application.ApplicationGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :return: A machine graph
        :rtype:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :raise pacman.exceptions.PacmanPartitionException:\
            If something goes wrong with the partitioning
        """
        ResourceTracker.check_constraints(graph.vertices)
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            supported_constraints=[
                MaxVertexAtomsConstraint, FixedVertexAtomsConstraint
            ],
            abstract_constraint_type=AbstractPartitionerConstraint)

        # start progress bar
        progress = ProgressBar(graph.n_vertices, "Partitioning graph vertices")
        machine_graph = MachineGraph("Machine graph for " + graph.label)
        graph_mapper = GraphMapper()
        resource_tracker = ResourceTracker(machine)

        # Partition one vertex at a time
        for vertex in progress.over(graph.vertices):
            self._partition_one_application_vertex(vertex, resource_tracker,
                                                   machine_graph, graph_mapper)

        utils.generate_machine_edges(machine_graph, graph_mapper, graph)

        return machine_graph, graph_mapper, resource_tracker.chips_used
    def __call__(self, graph, machine, preallocated_resources=None):
        """
        :param graph: The application_graph to partition
        :type graph:\
            :py:class:`pacman.model.graph.application.ApplicationGraph`
        :param machine: The machine with respect to which to partition the\
            application_graph
        :type machine: :py:class:`spinn_machine.Machine`
        :return: \
            A machine_graph of partitioned vertices and partitioned edges
        :rtype:\
            :py:class:`pacman.model.graph.machine.MachineGraph`
        :raise pacman.exceptions.PacmanPartitionException: \
            If something goes wrong with the partitioning
        """
        ResourceTracker.check_constraints(graph.vertices)
        utils.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            abstract_constraint_type=AbstractPartitionerConstraint,
            supported_constraints=[MaxVertexAtomsConstraint,
                                   SameAtomsAsVertexConstraint,
                                   FixedVertexAtomsConstraint])

        # Load the vertices and create the machine_graph to fill
        machine_graph = MachineGraph(
            label="partitioned graph for {}".format(graph.label))
        graph_mapper = GraphMapper()

        # sort out vertex's by placement constraints
        vertices = placer_utils.sort_vertices_by_known_constraints(
            graph.vertices)

        # Set up the progress
        n_atoms = 0
        for vertex in vertices:
            n_atoms += vertex.n_atoms
        progress = ProgressBar(n_atoms, "Partitioning graph vertices")

        resource_tracker = ResourceTracker(
            machine, preallocated_resources=preallocated_resources)

        # Group vertices that are supposed to be the same size
        vertex_groups = partition_utils.get_same_size_vertex_groups(vertices)

        # Partition one vertex at a time
        for vertex in vertices:

            # check that the vertex hasn't already been partitioned
            machine_vertices = graph_mapper.get_machine_vertices(vertex)

            # if not, partition
            if machine_vertices is None:
                self._partition_vertex(
                    vertex, machine_graph, graph_mapper, resource_tracker,
                    progress, vertex_groups)
        progress.end()

        partition_utils.generate_machine_edges(
            machine_graph, graph_mapper, graph)

        return machine_graph, graph_mapper, resource_tracker.chips_used
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """
        :param list(set(MachineVertex)) one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(MachineVertex, collection(MachineVertex))
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_utilities.progress_bar.ProgressBar progress:
        :rtype: Placements
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtual):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed, progress,
                    machine_graph)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)
            if not chips:
                chips = None

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed, machine_graph)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress, machine_graph)

        progress.end()
        return placements
Esempio n. 24
0
    def __call__(self, machine_graph, machine, n_keys_map, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: the machine graph
        :param ~spinn_machine.Machine machine: the SpiNNaker machine
        :param AbstractMachinePartitionNKeysMap n_keys_map:
            the n keys from partition map
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: placements.
        :rtype: Placements
        """
        # create progress bar
        progress_bar = ProgressBar(
            (machine_graph.n_vertices * self.ITERATIONS) + self.STEPS,
            "Placing graph vertices via spreading over an entire machine")

        # check that the algorithm can handle the constraints
        self._check_constraints(
            machine_graph.vertices,
            additional_placement_constraints={SameChipAsConstraint})
        progress_bar.update()

        # get same chip groups
        same_chip_vertex_groups = get_same_chip_vertex_groups(machine_graph)
        progress_bar.update()
        # get chip and core placed verts
        hard_chip_constraints = self._locate_hard_placement_verts(
            machine_graph)
        progress_bar.update()
        # get one to one groups
        one_to_one_groups = create_vertices_groups(
            machine_graph.vertices,
            functools.partial(self._find_one_to_one_vertices,
                              graph=machine_graph))
        progress_bar.update()

        # sort chips so that they are radial from a given point and other
        # init data structs
        chips_in_order = self._determine_chip_list(machine)
        resource_tracker = ResourceTracker(machine,
                                           plan_n_timesteps,
                                           chips=chips_in_order)
        placements = Placements()
        placed_vertices = set()
        cost_per_chip = defaultdict(int)
        progress_bar.update()

        # allocate hard ones
        for hard_vertex in hard_chip_constraints:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                hard_vertex.resources_required, hard_vertex.constraints)
            placements.add_placement(Placement(hard_vertex, x, y, p))
            placed_vertices.add(hard_vertex)
            cost_per_chip[x, y] += self._get_cost(hard_vertex, machine_graph,
                                                  n_keys_map)

        # place groups of verts that need the same chip on the same chip,
        self._place_same_chip_verts(same_chip_vertex_groups, chips_in_order,
                                    placements, progress_bar, resource_tracker,
                                    placed_vertices, cost_per_chip,
                                    machine_graph, n_keys_map)

        # place 1 group per chip if possible on same chip as any already
        # placed verts. if not then radially from it.
        self._place_one_to_one_verts(one_to_one_groups, chips_in_order,
                                     placements, progress_bar,
                                     resource_tracker, placed_vertices,
                                     cost_per_chip, machine_graph, n_keys_map,
                                     machine)

        # place vertices which don't have annoying placement constraints.
        # spread them over the chips so that they have minimal impact on the
        # overall incoming packet cost per router.
        self._place_left_over_verts(machine_graph, chips_in_order, placements,
                                    progress_bar, resource_tracker,
                                    placed_vertices, cost_per_chip, n_keys_map)
        progress_bar.end()

        # return the built placements
        return placements