Example #1
0
    def test_routing(self):
        graph = MachineGraph("Test")
        machine = VirtualMachine(2, 2)
        placements = Placements()
        vertices = list()

        for x in range(machine.max_chip_x + 1):
            for y in range(machine.max_chip_y + 1):
                chip = machine.get_chip_at(x, y)
                if chip is not None:
                    for processor in chip.processors:
                        if not processor.is_monitor:
                            vertex = SimpleMachineVertex(
                                resources=ResourceContainer())
                            graph.add_vertex(vertex)
                            placements.add_placement(
                                Placement(vertex, x, y,
                                          processor.processor_id))
                            vertices.append(vertex)

        for vertex in vertices:
            for vertex_to in vertices:
                if vertex != vertex_to:
                    graph.add_edge(MachineEdge(vertex, vertex_to), "Test")

        router = BasicDijkstraRouting()
        routing_paths = router.__call__(placements, machine, graph)

        for vertex in vertices:
            vertices_reached = set()
            queue = deque()
            seen_entries = set()
            placement = placements.get_placement_of_vertex(vertex)
            partition = graph.get_outgoing_edge_partition_starting_at_vertex(
                vertex, "Test")
            entry = routing_paths.get_entry_on_coords_for_edge(
                partition, placement.x, placement.y)
            self.assertEqual(entry.incoming_processor, placement.p)
            queue.append((placement.x, placement.y))
            while len(queue) > 0:
                x, y = queue.pop()
                entry = routing_paths.get_entry_on_coords_for_edge(
                    partition, x, y)
                self.assertIsNotNone(entry)
                chip = machine.get_chip_at(x, y)
                for p in entry.out_going_processors:
                    self.assertIsNotNone(chip.get_processor_with_id(p))
                    vertex_found = placements.get_vertex_on_processor(x, y, p)
                    vertices_reached.add(vertex_found)
                seen_entries.add((x, y))
                for link_id in entry.out_going_links:
                    link = chip.router.get_link(link_id)
                    self.assertIsNotNone(link)
                    dest_x, dest_y = link.destination_x, link.destination_y
                    if (dest_x, dest_y) not in seen_entries:
                        queue.append((dest_x, dest_y))

            for vertex_to in vertices:
                if vertex != vertex_to:
                    self.assertIn(vertex_to, vertices_reached)
    def test_ip_tags(self):
        machine = virtual_machine(12, 12)
        eth_chips = machine.ethernet_connected_chips
        vertices = [
            SimpleMachineVertex(ResourceContainer(
                iptags=[IPtagResource("127.0.0.1", port=None, strip_sdp=True)
                        ]),
                                label="Vertex {}".format(i))
            for i in range(len(eth_chips))
        ]
        print("Created {} vertices".format(len(vertices)))
        placements = Placements(
            Placement(vertex, chip.x, chip.y, 1)
            for vertex, chip in zip(vertices, eth_chips))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(machine,
                               plan_n_timesteps=None,
                               placements=placements)

        for vertex, chip in zip(vertices, eth_chips):
            iptags = tags.get_ip_tags_for_vertex(vertex)
            self.assertEqual(len(iptags), 1,
                             "Incorrect number of tags assigned")
            self.assertEqual(iptags[0].destination_x, chip.x,
                             "Destination of tag incorrect")
            self.assertEqual(iptags[0].destination_y, chip.y,
                             "Destination of tag incorrect")
            placement = placements.get_placement_of_vertex(vertex)
            print(placement, "has tag", iptags[0])
    def test_ip_tags(self):
        machine = VirtualMachine(12, 12, with_wrap_arounds=True)
        eth_chips = machine.ethernet_connected_chips
        vertices = [
            SimpleMachineVertex(
                ResourceContainer(iptags=[IPtagResource(
                    "127.0.0.1", port=None, strip_sdp=True)]),
                label="Vertex {}".format(i))
            for i in range(len(eth_chips))]
        print("Created {} vertices".format(len(vertices)))
        placements = Placements(
            Placement(vertex, chip.x, chip.y, 1)
            for vertex, chip in zip(vertices, eth_chips))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(
            machine, plan_n_timesteps=None, placements=placements)

        for vertex, chip in zip(vertices, eth_chips):
            iptags = tags.get_ip_tags_for_vertex(vertex)
            self.assertEqual(
                len(iptags), 1, "Incorrect number of tags assigned")
            self.assertEqual(
                iptags[0].destination_x, chip.x,
                "Destination of tag incorrect")
            self.assertEqual(
                iptags[0].destination_y, chip.y,
                "Destination of tag incorrect")
            placement = placements.get_placement_of_vertex(vertex)
            print(placement, "has tag", iptags[0])
    def test_routing(self):
        graph = MachineGraph("Test")
        machine = VirtualMachine(2, 2)
        placements = Placements()
        vertices = list()

        for x in range(machine.max_chip_x + 1):
            for y in range(machine.max_chip_y + 1):
                chip = machine.get_chip_at(x, y)
                if chip is not None:
                    for processor in chip.processors:
                        if not processor.is_monitor:
                            vertex = SimpleMachineVertex(
                                resources=ResourceContainer())
                            graph.add_vertex(vertex)
                            placements.add_placement(Placement(
                                vertex, x, y, processor.processor_id))
                            vertices.append(vertex)

        for vertex in vertices:
            for vertex_to in vertices:
                if vertex != vertex_to:
                    graph.add_edge(MachineEdge(vertex, vertex_to), "Test")

        router = BasicDijkstraRouting()
        routing_paths = router.__call__(placements, machine, graph)

        for vertex in vertices:
            vertices_reached = set()
            queue = deque()
            seen_entries = set()
            placement = placements.get_placement_of_vertex(vertex)
            partition = graph.get_outgoing_edge_partition_starting_at_vertex(
                vertex, "Test")
            entry = routing_paths.get_entry_on_coords_for_edge(
                partition, placement.x, placement.y)
            self.assertEqual(entry.incoming_processor, placement.p)
            queue.append((placement.x, placement.y))
            while len(queue) > 0:
                x, y = queue.pop()
                entry = routing_paths.get_entry_on_coords_for_edge(
                    partition, x, y)
                self.assertIsNotNone(entry)
                chip = machine.get_chip_at(x, y)
                for p in entry.processor_ids:
                    self.assertIsNotNone(chip.get_processor_with_id(p))
                    vertex_found = placements.get_vertex_on_processor(x, y, p)
                    vertices_reached.add(vertex_found)
                seen_entries.add((x, y))
                for link_id in entry.link_ids:
                    link = chip.router.get_link(link_id)
                    self.assertIsNotNone(link)
                    dest_x, dest_y = link.destination_x, link.destination_y
                    if (dest_x, dest_y) not in seen_entries:
                        queue.append((dest_x, dest_y))

            for vertex_to in vertices:
                if vertex != vertex_to:
                    self.assertIn(vertex_to, vertices_reached)
    def test_routing(self):
        graph = MachineGraph("Test")
        set_config("Machine", "down_chips", "1,2:5,4:3,3")
        machine = virtual_machine(8, 8)
        placements = Placements()
        vertices = list()

        for chip in machine.chips:
            for processor in chip.processors:
                if not processor.is_monitor:
                    vertex = SimpleMachineVertex(resources=ResourceContainer())
                    graph.add_vertex(vertex)
                    placements.add_placement(
                        Placement(vertex, chip.x, chip.y,
                                  processor.processor_id))
                    vertices.append(vertex)

        for vertex in vertices:
            graph.add_outgoing_edge_partition(
                MulticastEdgePartition(identifier="Test", pre_vertex=vertex))
            for vertex_to in vertices:
                graph.add_edge(MachineEdge(vertex, vertex_to), "Test")

        routing_paths = ner_route_traffic_aware(graph, machine, placements)

        for vertex in vertices:
            vertices_reached = set()
            queue = deque()
            seen_entries = set()
            placement = placements.get_placement_of_vertex(vertex)
            partition = graph.get_outgoing_edge_partition_starting_at_vertex(
                vertex, "Test")
            entry = routing_paths.get_entry_on_coords_for_edge(
                partition, placement.x, placement.y)
            self.assertEqual(entry.incoming_processor, placement.p)
            queue.append((placement.x, placement.y))
            while len(queue) > 0:
                x, y = queue.pop()
                entry = routing_paths.get_entry_on_coords_for_edge(
                    partition, x, y)
                self.assertIsNotNone(entry)
                chip = machine.get_chip_at(x, y)
                for p in entry.processor_ids:
                    self.assertIsNotNone(chip.get_processor_with_id(p))
                    vertex_found = placements.get_vertex_on_processor(x, y, p)
                    vertices_reached.add(vertex_found)
                seen_entries.add((x, y))
                for link_id in entry.link_ids:
                    link = chip.router.get_link(link_id)
                    self.assertIsNotNone(link)
                    dest_x, dest_y = link.destination_x, link.destination_y
                    if (dest_x, dest_y) not in seen_entries:
                        queue.append((dest_x, dest_y))

            for vertex_to in vertices:
                self.assertIn(vertex_to, vertices_reached)
    def test_too_many_ip_tags_for_1_board(self):
        n_extra_vertices = 3
        machine = virtual_machine(12, 12)
        eth_chips = machine.ethernet_connected_chips
        eth_chip = eth_chips[0]
        eth_chip_2 = machine.get_chip_at(eth_chip.x + 1, eth_chip.y + 1)
        eth_procs = [
            proc.processor_id for proc in eth_chip.processors
            if not proc.is_monitor
        ]
        procs = [proc for proc in eth_chip_2.processors if not proc.is_monitor]
        eth2_procs = [proc.processor_id for proc in procs]
        proc = procs[-1]
        eth_vertices = [
            SimpleMachineVertex(ResourceContainer(
                iptags=[IPtagResource("127.0.0.1", port=tag, strip_sdp=True)]),
                                label="Ethernet Vertex {}".format(proc))
            for tag in eth_chip.tag_ids
        ]
        eth2_vertices = [
            SimpleMachineVertex(ResourceContainer(iptags=[
                IPtagResource("127.0.0.1", port=10000 + tag, strip_sdp=True)
            ]),
                                label="Ethernet 2 Vertex {}".format(proc))
            for tag in range(n_extra_vertices)
        ]
        placements = Placements(
            Placement(vertex, eth_chip.x, eth_chip.y, proc)
            for proc, vertex in zip(eth_procs, eth_vertices))
        placements.add_placements(
            Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc)
            for proc, vertex in zip(eth2_procs, eth2_vertices))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(machine,
                               plan_n_timesteps=None,
                               placements=placements)

        tags_by_board = defaultdict(set)
        for vertices in (eth_vertices, eth2_vertices):
            for vertex in vertices:
                iptags = tags.get_ip_tags_for_vertex(vertex)
                self.assertEqual(len(iptags), 1,
                                 "Incorrect number of tags assigned")
                placement = placements.get_placement_of_vertex(vertex)
                print(placement, "has tag", iptags[0])
                self.assertFalse(
                    iptags[0].tag in tags_by_board[iptags[0].board_address],
                    "Tag used more than once")
                tags_by_board[iptags[0].board_address].add(iptags[0].tag)

        self.assertEqual(len(tags_by_board[eth_chip.ip_address]),
                         len(eth_chip.tag_ids),
                         "Wrong number of tags assigned to first Ethernet")
    def test_too_many_ip_tags_for_1_board(self):
        n_extra_vertices = 3
        machine = VirtualMachine(12, 12, with_wrap_arounds=True)
        eth_chips = machine.ethernet_connected_chips
        eth_chip = eth_chips[0]
        eth_chip_2 = machine.get_chip_at(eth_chip.x + 1, eth_chip.y + 1)
        eth_procs = [
            proc.processor_id for proc in eth_chip.processors
            if not proc.is_monitor]
        procs = [proc for proc in eth_chip_2.processors if not proc.is_monitor]
        eth2_procs = [proc.processor_id for proc in procs]
        proc = procs[-1]
        eth_vertices = [
            SimpleMachineVertex(
                ResourceContainer(iptags=[IPtagResource(
                    "127.0.0.1", port=tag, strip_sdp=True)]),
                label="Ethernet Vertex {}".format(proc))
            for tag in eth_chip.tag_ids]
        eth2_vertices = [
            SimpleMachineVertex(
                ResourceContainer(iptags=[IPtagResource(
                    "127.0.0.1", port=10000 + tag, strip_sdp=True)]),
                label="Ethernet 2 Vertex {}".format(proc))
            for tag in range(n_extra_vertices)]
        placements = Placements(
            Placement(vertex, eth_chip.x, eth_chip.y, proc)
            for proc, vertex in zip(eth_procs, eth_vertices))
        placements.add_placements(
            Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc)
            for proc, vertex in zip(eth2_procs, eth2_vertices))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(
            machine, plan_n_timesteps=None, placements=placements)

        tags_by_board = defaultdict(set)
        for vertices in (eth_vertices, eth2_vertices):
            for vertex in vertices:
                iptags = tags.get_ip_tags_for_vertex(vertex)
                self.assertEqual(
                    len(iptags), 1, "Incorrect number of tags assigned")
                placement = placements.get_placement_of_vertex(vertex)
                print(placement, "has tag", iptags[0])
                self.assertFalse(
                    iptags[0].tag in tags_by_board[iptags[0].board_address],
                    "Tag used more than once")
                tags_by_board[iptags[0].board_address].add(iptags[0].tag)

        self.assertEqual(
            len(tags_by_board[eth_chip.ip_address]), len(eth_chip.tag_ids),
            "Wrong number of tags assigned to first Ethernet")
Example #8
0
    def test_get_placement_of_vertex(self):
        """
        checks the placements get placement method
        """
        subv = list()
        for i in range(5):
            subv.append(SimpleMachineVertex(None, ""))

        pl = list()
        for i in range(4):
            pl.append(Placement(subv[i], 0, 0, i))

        pls = Placements(pl)
        for i in range(4):
            self.assertEqual(pls.get_placement_of_vertex(subv[i]), pl[i])
    def test_get_placement_of_vertex(self):
        """
        checks the placements get placement method
        """
        subv = list()
        for i in range(5):
            subv.append(SimpleMachineVertex(None, ""))

        pl = list()
        for i in range(4):
            pl.append(Placement(subv[i], 0, 0, i))

        pls = Placements(pl)
        for i in range(4):
            self.assertEqual(pls.get_placement_of_vertex(subv[i]), pl[i])
    def test_get_vertex_on_processor(self):
        """
        checks that from a placements object, you can get to the correct
        vertex using the get_vertex_on_processor() method
        """
        subv = list()
        for i in range(5):
            subv.append(SimpleMachineVertex(None, ""))

        pl = list()
        for i in range(4):
            pl.append(Placement(subv[i], 0, 0, i))

        pls = Placements(pl)
        for i in range(4):
            self.assertEqual(pls.get_vertex_on_processor(0, 0, i), subv[i])

        self.assertEqual(pls.get_placement_of_vertex(subv[0]), pl[0])
    def test_get_vertex_on_processor(self):
        """
        checks that from a placements object, you can get to the correct
        vertex using the get_vertex_on_processor() method
        """
        subv = list()
        for i in range(5):
            subv.append(SimpleMachineVertex(None, ""))

        pl = list()
        for i in range(4):
            pl.append(Placement(subv[i], 0, 0, i))

        pls = Placements(pl)
        for i in range(4):
            self.assertEqual(pls.get_vertex_on_processor(0, 0, i), subv[i])

        self.assertEqual(pls.get_placement_of_vertex(subv[0]), pl[0])
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """
        :param list(set(MachineVertex)) one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(MachineVertex, collection(MachineVertex))
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_utilities.progress_bar.ProgressBar progress:
        :rtype: Placements
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtual):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed, progress,
                    machine_graph)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)
            if not chips:
                chips = None

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed, machine_graph)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress, machine_graph)

        progress.end()
        return placements
    def _do_allocation(
            self, one_to_one_groups, same_chip_vertex_groups,
            machine, plan_n_timesteps, machine_graph, progress):
        """

        :param one_to_one_groups:
            Groups of vertexes that would be nice on same chip
        :type one_to_one_groups:
            list(set(vertex))
        :param same_chip_vertex_groups:
            Mapping of Vertex to the Vertex that must be on the same Chip
        :type same_chip_vertex_groups:
            dict(vertex, collection(vertex))
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param progress:
        :return:
        """

        placements = Placements()

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        all_vertices_placed = set()

        # RadialPlacementFromChipConstraint won't work here
        for vertex in machine_graph.vertices:
            for constraint in vertex.constraints:
                if isinstance(constraint, RadialPlacementFromChipConstraint):
                    raise PacmanPlaceException(
                        "A RadialPlacementFromChipConstraint will not work "
                        "with the OneToOnePlacer algorithm; use the "
                        "RadialPlacer algorithm instead")

        unconstrained = list()
        # Find and place vertices with hard constraints
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractVirtualVertex):
                virtual_p = 0
                while placements.is_processor_occupied(
                        vertex.virtual_chip_x, vertex.virtual_chip_y,
                        virtual_p):
                    virtual_p += 1
                placements.add_placement(Placement(
                    vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                    virtual_p))
                all_vertices_placed.add(vertex)
            elif locate_constraints_of_type(
                    vertex.constraints, ChipAndCoreConstraint):
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups,
                    all_vertices_placed, progress)
            else:
                unconstrained.append(vertex)

        for grouped_vertices in one_to_one_groups:
            # Get unallocated vertices and placements of allocated vertices
            unallocated = list()
            chips = list()
            for vert in grouped_vertices:
                if vert in all_vertices_placed:
                    placement = placements.get_placement_of_vertex(vert)
                    chips.append((placement.x, placement.y))
                else:
                    unallocated.append(vert)

            if 0 < len(unallocated) <=\
                    resource_tracker.get_maximum_cores_available_on_a_chip():
                # Try to allocate all vertices to the same chip
                self._allocate_one_to_one_group(
                    resource_tracker, unallocated, progress, placements, chips,
                    all_vertices_placed)
            # if too big or failed go on to other groups first

        # check all have been allocated if not do so now.
        for vertex in machine_graph.vertices:
            if vertex not in all_vertices_placed:
                self._allocate_same_chip_as_group(
                    vertex, placements, resource_tracker,
                    same_chip_vertex_groups, all_vertices_placed,
                    progress)

        progress.end()
        return placements
Example #14
0
    def _create_fake_network(self, ethernet_connected_chip):
        """ Generate the fake network for each board

        :param ethernet_connected_chip: the ethernet chip to fire from
        :return: fake graph, fake placements, fake machine.
        """

        fake_graph = MachineGraph(label="routing fake_graph")
        fake_placements = Placements()
        destination_to_partition_id_map = dict()

        # build fake setup for the routing
        eth_x = ethernet_connected_chip.x
        eth_y = ethernet_connected_chip.y
        fake_machine = virtual_submachine(self._real_machine,
                                          ethernet_connected_chip)

        # Build a fake graph with vertices for all the monitors
        for chip in self._real_machine.get_chips_by_ethernet(eth_x, eth_y):
            # locate correct chips extra monitor placement
            placement = self._real_placements.get_placement_of_vertex(
                self._monitors[chip.x, chip.y])

            # adjust for wrap around's
            fake_x, fake_y = self._real_machine.get_local_xy(chip)

            # add destination vertex
            vertex = RoutingMachineVertex()
            fake_graph.add_vertex(vertex)

            # build fake placement
            fake_placements.add_placement(
                Placement(x=fake_x, y=fake_y, p=placement.p, vertex=vertex))

        # build source vertex, which is for the Gatherer
        vertex_source = RoutingMachineVertex()
        fake_graph.add_vertex(vertex_source)

        for free_processor in range(Machine.MAX_CORES_PER_CHIP):
            if not fake_placements.is_processor_occupied(
                    x=FAKE_ETHERNET_CHIP_X,
                    y=FAKE_ETHERNET_CHIP_Y,
                    p=free_processor):
                fake_placements.add_placement(
                    Placement(x=FAKE_ETHERNET_CHIP_X,
                              y=FAKE_ETHERNET_CHIP_Y,
                              p=free_processor,
                              vertex=vertex_source))
                break

        # deal with edges, each one being in a unique partition id, to
        # allow unique routing to each chip.
        counter = KEY_START_VALUE
        for vertex in fake_graph.vertices:
            if vertex == vertex_source:
                continue
            fake_graph.add_edge(
                MachineEdge(pre_vertex=vertex_source, post_vertex=vertex),
                counter)
            placement = fake_placements.get_placement_of_vertex(vertex)

            # adjust to real chip ids
            real_chip_xy = self._real_machine.get_global_xy(
                placement.x, placement.y, eth_x, eth_y)
            destination_to_partition_id_map[real_chip_xy] = counter
            counter += N_KEYS_PER_PARTITION_ID

        return (fake_graph, fake_placements, fake_machine,
                destination_to_partition_id_map)