Ejemplo n.º 1
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements,
                      location):
        """
        :param MachineVertex vertex:
        :param ResourceTracker resource_tracker:
        :param ~spinn_machine.Machine machine:
        :param Placements placements:
        :param dict(MachineVertex,list(MachineVertex)) location:
        :rtype: list(MachineVertex)
        """
        vertices = location[vertex]
        # random x and y value within the maximum of the machine
        chips = self._generate_random_chips(machine)

        if len(vertices) > 1:
            assigned_values = \
                resource_tracker.allocate_constrained_group_resources([
                    (vert.resources_required, vert.constraints)
                    for vert in vertices], chips)
            for (x, y, p, _, _), vert in zip(assigned_values, vertices):
                placement = Placement(vert, x, y, p)
                placements.add_placement(placement)
        else:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, chips)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)

        return vertices
    def test_call(self):
        """ Test calling the binary gatherer normally
        """

        vertex_1 = _TestVertexWithBinary("test.aplx", ExecutableType.RUNNING)
        vertex_2 = _TestVertexWithBinary("test2.aplx", ExecutableType.RUNNING)
        vertex_3 = _TestVertexWithBinary("test2.aplx", ExecutableType.RUNNING)
        vertex_4 = _TestVertexWithoutBinary()

        graph = MachineGraph("Test")
        graph.add_vertices([vertex_1, vertex_2, vertex_3])

        placements = Placements(placements=[
            Placement(vertex_1, 0, 0, 0),
            Placement(vertex_2, 0, 0, 1),
            Placement(vertex_3, 0, 0, 2),
            Placement(vertex_4, 0, 0, 3)
        ])

        gatherer = GraphBinaryGatherer()
        targets = gatherer.__call__(placements, graph, _TestExecutableFinder())
        gatherer = LocateExecutableStartType()
        start_type = gatherer.__call__(graph, placements)
        self.assertEqual(next(iter(start_type)), ExecutableType.RUNNING)
        self.assertEqual(targets.total_processors, 3)

        test_cores = targets.get_cores_for_binary("test.aplx")
        test_2_cores = targets.get_cores_for_binary("test2.aplx")
        self.assertEqual(len(test_cores), 1)
        self.assertEqual(len(test_2_cores), 2)
        self.assertIn((0, 0, 0), test_cores)
        self.assertIn((0, 0, 1), test_2_cores)
        self.assertIn((0, 0, 2), test_2_cores)
Ejemplo n.º 3
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements,
                      vertices_on_same_chip):
        vertices = vertices_on_same_chip[vertex]

        # Check for the radial placement constraint
        radial_constraints = locate_constraints_of_type(
            vertices, RadialPlacementFromChipConstraint)
        start_x, start_y = self._get_start(radial_constraints)
        chips = None
        if start_x is not None and start_y is not None:
            chips = self._generate_radial_chips(machine, resource_tracker,
                                                start_x, start_y)

        if len(vertices) > 1:
            assigned_values = \
                resource_tracker.allocate_constrained_group_resources([
                    (vert.resources_required, vert.constraints)
                    for vert in vertices
                ], chips)
            for (x, y, p, _, _), vert in zip(assigned_values, vertices):
                placement = Placement(vert, x, y, p)
                placements.add_placement(placement)
        else:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, chips)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)

        return vertices
def test_ner_route_default():
    unittest_setup()
    graph = MachineGraph("Test")
    machine = virtual_machine(8, 8)
    placements = Placements()

    source_vertex = SimpleMachineVertex(None)
    graph.add_vertex(source_vertex)
    placements.add_placement(Placement(source_vertex, 0, 0, 1))
    target_vertex = SimpleMachineVertex(None)
    graph.add_vertex(target_vertex)
    placements.add_placement(Placement(target_vertex, 0, 2, 1))
    edge = MachineEdge(source_vertex, target_vertex)
    graph.add_edge(edge, "Test")
    partition = graph.get_outgoing_partition_for_edge(edge)

    routes = ner_route(graph, machine, placements)

    source_route = routes.get_entries_for_router(0, 0)[partition]
    assert (not source_route.defaultable)
    mid_route = routes.get_entries_for_router(0, 1)[partition]
    print(mid_route.incoming_link, mid_route.link_ids)
    assert (mid_route.defaultable)
    end_route = routes.get_entries_for_router(0, 2)[partition]
    assert (not end_route.defaultable)
    def test_listener_creation(self):
        # Test of buffer manager listener creation problem, where multiple
        # listeners were being created for the buffer manager traffic from
        # individual boards, where it's preferred all traffic is received by
        # a single listener

        # Create two vertices
        v1 = _TestVertex(10, "v1", 256)
        v2 = _TestVertex(10, "v2", 256)

        # Create two tags - important thing is port=None
        t1 = IPTag(board_address='127.0.0.1', destination_x=0,
                   destination_y=1, tag=1, port=None, ip_address=None,
                   strip_sdp=True, traffic_identifier='BufferTraffic')
        t2 = IPTag(board_address='127.0.0.1', destination_x=0,
                   destination_y=2, tag=1, port=None, ip_address=None,
                   strip_sdp=True, traffic_identifier='BufferTraffic')

        # Create 'Tags' object and add tags
        t = Tags()
        t.add_ip_tag(t1, v1)
        t.add_ip_tag(t2, v2)

        # Create board connections
        connections = []
        connections.append(SCAMPConnection(
            remote_host=None))
        connections.append(EIEIOConnection())

        # Create two placements and 'Placements' object
        pl1 = Placement(v1, 0, 1, 1)
        pl2 = Placement(v2, 0, 2, 1)
        pl = Placements([pl1, pl2])

        # Create transceiver
        trnx = Transceiver(version=5, connections=connections)
        # Alternatively, one can register a udp listener for testing via:
        # trnx.register_udp_listener(callback=None,
        #        connection_class=EIEIOConnection)

        # Create buffer manager
        bm = BufferManager(pl, t, trnx)

        # Register two listeners, and check the second listener uses the
        # first rather than creating a new one
        bm._add_buffer_listeners(vertex=v1)
        bm._add_buffer_listeners(vertex=v2)

        number_of_listeners = 0
        for i in bm._transceiver._udp_listenable_connections_by_class[
                EIEIOConnection]:
            # Check if listener is registered on connection - we only expect
            # one listener to be registered, as all connections can use the
            # same listener for the buffer manager
            if not i[1] is None:
                number_of_listeners += 1
            print i
        self.assertEqual(number_of_listeners, 1)
Ejemplo n.º 6
0
    def test_with_application_vertices(self):
        """ Test that an application vertex's data is rewritten correctly
        """
        # Create a default SDRAM to set the max to default
        SDRAM()
        reload_region_data = [(0, [0] * 10), (1, [1] * 20)]
        vertex = _TestApplicationVertex(10, reload_region_data)
        m_slice_1 = Slice(0, 4)
        m_slice_2 = Slice(5, 9)
        m_vertex_1 = vertex.create_machine_vertex(m_slice_1, None, None, None)
        m_vertex_2 = vertex.create_machine_vertex(m_slice_2, None, None, None)

        graph_mapper = GraphMapper()
        graph_mapper.add_vertex_mapping(m_vertex_1, m_slice_1, vertex)
        graph_mapper.add_vertex_mapping(m_vertex_2, m_slice_2, vertex)

        placements = Placements(
            [Placement(m_vertex_1, 0, 0, 1),
             Placement(m_vertex_2, 0, 0, 2)])

        user_0_addresses = {
            (placement.x, placement.y, placement.p): i * 1000
            for i, placement in enumerate(placements.placements)
        }
        region_addresses = [i for i in range(MAX_MEM_REGIONS)]
        transceiver = _MockTransceiver(user_0_addresses, region_addresses)

        reloader = DSGRegionReloader()
        reloader.__call__(transceiver, placements, "localhost", "test", False,
                          "test", graph_mapper)

        regions_rewritten = transceiver.regions_rewritten

        # Check that the number of times the data has been regenerated is
        # correct
        self.assertEqual(vertex.regenerate_call_count, placements.n_placements)

        # Check that the number of regions rewritten is correct
        self.assertEqual(len(transceiver.regions_rewritten),
                         placements.n_placements * len(reload_region_data))

        # Check that the data rewritten is correct
        for i, placement in enumerate(placements.placements):
            user_0_address = user_0_addresses[placement.x, placement.y,
                                              placement.p]
            for j in range(len(reload_region_data)):
                pos = (i * len(reload_region_data)) + j
                region, data = reload_region_data[j]
                address = get_region_base_address_offset(
                    user_0_address, 0) + region_addresses[region]
                data = bytearray(numpy.array(data, dtype="uint32").tobytes())

                # Check that the base address and data written is correct
                self.assertEqual(regions_rewritten[pos], (address, data))

        # Delete data files
        shutil.rmtree("test")
Ejemplo n.º 7
0
    def test_too_many_ip_tags_for_1_board(self):
        n_extra_vertices = 3
        machine = virtual_machine(12, 12)
        eth_chips = machine.ethernet_connected_chips
        eth_chip = eth_chips[0]
        eth_chip_2 = machine.get_chip_at(eth_chip.x + 1, eth_chip.y + 1)
        eth_procs = [
            proc.processor_id for proc in eth_chip.processors
            if not proc.is_monitor
        ]
        procs = [proc for proc in eth_chip_2.processors if not proc.is_monitor]
        eth2_procs = [proc.processor_id for proc in procs]
        proc = procs[-1]
        eth_vertices = [
            SimpleMachineVertex(ResourceContainer(
                iptags=[IPtagResource("127.0.0.1", port=tag, strip_sdp=True)]),
                                label="Ethernet Vertex {}".format(proc))
            for tag in eth_chip.tag_ids
        ]
        eth2_vertices = [
            SimpleMachineVertex(ResourceContainer(iptags=[
                IPtagResource("127.0.0.1", port=10000 + tag, strip_sdp=True)
            ]),
                                label="Ethernet 2 Vertex {}".format(proc))
            for tag in range(n_extra_vertices)
        ]
        placements = Placements(
            Placement(vertex, eth_chip.x, eth_chip.y, proc)
            for proc, vertex in zip(eth_procs, eth_vertices))
        placements.add_placements(
            Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc)
            for proc, vertex in zip(eth2_procs, eth2_vertices))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(machine,
                               plan_n_timesteps=None,
                               placements=placements)

        tags_by_board = defaultdict(set)
        for vertices in (eth_vertices, eth2_vertices):
            for vertex in vertices:
                iptags = tags.get_ip_tags_for_vertex(vertex)
                self.assertEqual(len(iptags), 1,
                                 "Incorrect number of tags assigned")
                placement = placements.get_placement_of_vertex(vertex)
                print(placement, "has tag", iptags[0])
                self.assertFalse(
                    iptags[0].tag in tags_by_board[iptags[0].board_address],
                    "Tag used more than once")
                tags_by_board[iptags[0].board_address].add(iptags[0].tag)

        self.assertEqual(len(tags_by_board[eth_chip.ip_address]),
                         len(eth_chip.tag_ids),
                         "Wrong number of tags assigned to first Ethernet")
Ejemplo n.º 8
0
def convert_from_rig_placements(rig_placements, rig_allocations,
                                machine_graph):
    placements = Placements()
    for vertex in rig_placements:
        if isinstance(vertex, AbstractVirtualVertex):
            placements.add_placement(
                Placement(vertex, vertex.virtual_chip_x, vertex.virtual_chip_y,
                          None))
        else:
            x, y = rig_placements[vertex]
            p = rig_allocations[vertex]["cores"].start
            placements.add_placement(Placement(vertex, x, y, p))

    return placements
Ejemplo n.º 9
0
    def __call__(self, machine_graph, machine):
        """ Place a machine_graph so that each vertex is placed on a core

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(machine)
        for vertex in progress.over(vertices):
            # Create and store a new placement anywhere on the board
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, None)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)
        return placements
Ejemplo n.º 10
0
    def test_ip_tags(self):
        machine = virtual_machine(12, 12)
        eth_chips = machine.ethernet_connected_chips
        vertices = [
            SimpleMachineVertex(ResourceContainer(
                iptags=[IPtagResource("127.0.0.1", port=None, strip_sdp=True)
                        ]),
                                label="Vertex {}".format(i))
            for i in range(len(eth_chips))
        ]
        print("Created {} vertices".format(len(vertices)))
        placements = Placements(
            Placement(vertex, chip.x, chip.y, 1)
            for vertex, chip in zip(vertices, eth_chips))
        allocator = BasicTagAllocator()
        _, _, tags = allocator(machine,
                               plan_n_timesteps=None,
                               placements=placements)

        for vertex, chip in zip(vertices, eth_chips):
            iptags = tags.get_ip_tags_for_vertex(vertex)
            self.assertEqual(len(iptags), 1,
                             "Incorrect number of tags assigned")
            self.assertEqual(iptags[0].destination_x, chip.x,
                             "Destination of tag incorrect")
            self.assertEqual(iptags[0].destination_y, chip.y,
                             "Destination of tag incorrect")
            placement = placements.get_placement_of_vertex(vertex)
            print(placement, "has tag", iptags[0])
Ejemplo n.º 11
0
    def test_routing(self):
        graph = MachineGraph("Test")
        machine = VirtualMachine(2, 2)
        placements = Placements()
        vertices = list()

        for x in range(machine.max_chip_x + 1):
            for y in range(machine.max_chip_y + 1):
                chip = machine.get_chip_at(x, y)
                if chip is not None:
                    for processor in chip.processors:
                        if not processor.is_monitor:
                            vertex = SimpleMachineVertex(
                                resources=ResourceContainer())
                            graph.add_vertex(vertex)
                            placements.add_placement(
                                Placement(vertex, x, y,
                                          processor.processor_id))
                            vertices.append(vertex)

        for vertex in vertices:
            for vertex_to in vertices:
                if vertex != vertex_to:
                    graph.add_edge(MachineEdge(vertex, vertex_to), "Test")

        router = BasicDijkstraRouting()
        routing_paths = router.__call__(placements, machine, graph)

        for vertex in vertices:
            vertices_reached = set()
            queue = deque()
            seen_entries = set()
            placement = placements.get_placement_of_vertex(vertex)
            partition = graph.get_outgoing_edge_partition_starting_at_vertex(
                vertex, "Test")
            entry = routing_paths.get_entry_on_coords_for_edge(
                partition, placement.x, placement.y)
            self.assertEqual(entry.incoming_processor, placement.p)
            queue.append((placement.x, placement.y))
            while len(queue) > 0:
                x, y = queue.pop()
                entry = routing_paths.get_entry_on_coords_for_edge(
                    partition, x, y)
                self.assertIsNotNone(entry)
                chip = machine.get_chip_at(x, y)
                for p in entry.out_going_processors:
                    self.assertIsNotNone(chip.get_processor_with_id(p))
                    vertex_found = placements.get_vertex_on_processor(x, y, p)
                    vertices_reached.add(vertex_found)
                seen_entries.add((x, y))
                for link_id in entry.out_going_links:
                    link = chip.router.get_link(link_id)
                    self.assertIsNotNone(link)
                    dest_x, dest_y = link.destination_x, link.destination_y
                    if (dest_x, dest_y) not in seen_entries:
                        queue.append((dest_x, dest_y))

            for vertex_to in vertices:
                if vertex != vertex_to:
                    self.assertIn(vertex_to, vertices_reached)
Ejemplo n.º 12
0
 def test_create_new_placements(self):
     """
     test creating a placements object
     """
     subv = SimpleMachineVertex(None, "")
     pl = Placement(subv, 0, 0, 1)
     Placements([pl])
Ejemplo n.º 13
0
    def _allocate_one_to_one_group(
            resource_tracker, vertices, progress, placements, chips,
            all_vertices_placed, machine_graph):
        """
        :param ResourceTracker resource_tracker:
        :param list(MachineVertex) vertices:
        :param ~spinn_utilities.progress_bar.ProgressBar progress:
        :param Placements placements:
        :param chips:
        :type chips: iterable(tuple(int, int)) or None
        :param MachineGraph machine_graph: machine graph
        :param set(MachineVertex) all_vertices_placed:
        :rtype: bool
        """
        try:
            allocs = resource_tracker.allocate_constrained_group_resources(
                create_requirement_collections(vertices, machine_graph),
                chips)

            # allocate cores to vertices
            for vertex, (x, y, p, _, _) in progress.over(
                    zip(vertices, allocs), False):
                placements.add_placement(Placement(vertex, x, y, p))
                all_vertices_placed.add(vertex)
            return True
        except (PacmanValueError, PacmanException,
                PacmanInvalidParameterException):
            return False
Ejemplo n.º 14
0
    def _allocate_individual(vertex, placements, tracker,
                             same_chip_vertex_groups, all_vertices_placed):
        if vertex not in all_vertices_placed:
            vertices = same_chip_vertex_groups[vertex]

            if len(vertices) > 1:
                resources = tracker.allocate_constrained_group_resources([
                    (v.resources_required, v.constraints) for v in vertices
                ])
                for (x, y, p, _, _), v in zip(resources, vertices):
                    placements.add_placement(Placement(v, x, y, p))
                    all_vertices_placed.add(v)
            else:
                (x, y, p, _, _) = tracker.\
                    allocate_constrained_resources(
                        vertex.resources_required, vertex.constraints)
                placements.add_placement(Placement(vertex, x, y, p))
                all_vertices_placed.add(vertex)
Ejemplo n.º 15
0
    def test_retrieve_direct_block(self):
        default_config_paths = os.path.join(
            os.path.dirname(abstract_spinnaker_common.__file__),
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME)

        config = conf_loader.load_config(
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths)

        key = 0
        n_rows = 2

        direct_matrix = bytearray(struct.pack("<IIII", 1, 2, 3, 4))
        direct_matrix_1_expanded = bytearray(
            struct.pack("<IIIIIIII", 0, 1, 0, 1, 0, 1, 0, 2))
        direct_matrix_2_expanded = bytearray(
            struct.pack("<IIIIIIII", 0, 1, 0, 3, 0, 1, 0, 4))

        synaptic_manager = SynapticManager(
            synapse_type=None,
            ring_buffer_sigma=5.0,
            spikes_per_second=100.0,
            config=config,
            population_table_type=MockMasterPopulationTable(
                {key: [(1, 0, True), (1, n_rows * 4, True)]}),
            synapse_io=MockSynapseIO())

        transceiver = MockTransceiverRawData(direct_matrix)
        placement = Placement(None, 0, 0, 1)

        data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver,
            placement=placement,
            master_pop_table_address=0,
            indirect_synapses_address=0,
            direct_synapses_address=0,
            key=key,
            n_rows=n_rows,
            index=0,
            using_extra_monitor_cores=False)
        data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver,
            placement=placement,
            master_pop_table_address=0,
            indirect_synapses_address=0,
            direct_synapses_address=0,
            key=key,
            n_rows=n_rows,
            index=1,
            using_extra_monitor_cores=False)

        # Row lengths should be 1
        assert row_len_1 == 1
        assert row_len_2 == 1

        # Check the data retrieved
        assert data_1 == direct_matrix_1_expanded
        assert data_2 == direct_matrix_2_expanded
    def test_routing(self):
        graph = MachineGraph("Test")
        set_config("Machine", "down_chips", "1,2:5,4:3,3")
        machine = virtual_machine(8, 8)
        placements = Placements()
        vertices = list()

        for chip in machine.chips:
            for processor in chip.processors:
                if not processor.is_monitor:
                    vertex = SimpleMachineVertex(resources=ResourceContainer())
                    graph.add_vertex(vertex)
                    placements.add_placement(
                        Placement(vertex, chip.x, chip.y,
                                  processor.processor_id))
                    vertices.append(vertex)

        for vertex in vertices:
            graph.add_outgoing_edge_partition(
                MulticastEdgePartition(identifier="Test", pre_vertex=vertex))
            for vertex_to in vertices:
                graph.add_edge(MachineEdge(vertex, vertex_to), "Test")

        routing_paths = ner_route_traffic_aware(graph, machine, placements)

        for vertex in vertices:
            vertices_reached = set()
            queue = deque()
            seen_entries = set()
            placement = placements.get_placement_of_vertex(vertex)
            partition = graph.get_outgoing_edge_partition_starting_at_vertex(
                vertex, "Test")
            entry = routing_paths.get_entry_on_coords_for_edge(
                partition, placement.x, placement.y)
            self.assertEqual(entry.incoming_processor, placement.p)
            queue.append((placement.x, placement.y))
            while len(queue) > 0:
                x, y = queue.pop()
                entry = routing_paths.get_entry_on_coords_for_edge(
                    partition, x, y)
                self.assertIsNotNone(entry)
                chip = machine.get_chip_at(x, y)
                for p in entry.processor_ids:
                    self.assertIsNotNone(chip.get_processor_with_id(p))
                    vertex_found = placements.get_vertex_on_processor(x, y, p)
                    vertices_reached.add(vertex_found)
                seen_entries.add((x, y))
                for link_id in entry.link_ids:
                    link = chip.router.get_link(link_id)
                    self.assertIsNotNone(link)
                    dest_x, dest_y = link.destination_x, link.destination_y
                    if (dest_x, dest_y) not in seen_entries:
                        queue.append((dest_x, dest_y))

            for vertex_to in vertices:
                self.assertIn(vertex_to, vertices_reached)
Ejemplo n.º 17
0
 def test_router_with_one_hop_route_all_default_link_5(self):
     self.placements = Placements()
     self.placement1 = Placement(x=0, y=2, p=2, vertex=self.vertex1)
     self.placement2 = Placement(x=0, y=0, p=2, vertex=self.vertex2)
     self.placements.add_placement(self.placement1)
     self.placements.add_placement(self.placement2)
     # sort out routing infos
     self.routing_info = RoutingInfo()
     self.edge_routing_info1 = PartitionRoutingInfo(key=2 << 11,
                                                    mask=DEFAULT_MASK,
                                                    edge=self.edge)
     self.routing_info.add_partition_info(self.edge_routing_info1)
     # create machine
     self.machine = VirtualMachine(10, 10, False)
     self.routing = BasicDijkstraRouting()
     self.routing.route(machine=self.machine,
                        placements=self.placements,
                        machine_graph=self.graph,
                        routing_info_allocation=self.routing_info)
Ejemplo n.º 18
0
 def test_create_new_placements_duplicate_vertex(self):
     """
     check that you cant put a vertex in multiple placements
     """
     subv = SimpleMachineVertex(None, "")
     pl = list()
     for i in range(4):
         pl.append(Placement(subv, 0, 0, i))
     with self.assertRaises(PacmanAlreadyPlacedError):
         Placements(pl)
Ejemplo n.º 19
0
 def test_create_new_placement(self):
     """
     test that creating a new placement puts stuff in the right place
     """
     subv = SimpleMachineVertex(None, "")
     pl = Placement(subv, 0, 0, 1)
     self.assertEqual(pl.x, 0)
     self.assertEqual(pl.y, 0)
     self.assertEqual(pl.p, 1)
     self.assertEqual(subv, pl.vertex)
Ejemplo n.º 20
0
    def _place_same_chip_verts(self, same_chip_vertex_groups, chips_in_order,
                               placements, progress_bar, resource_tracker,
                               placed_vertices, cost_per_chip, machine_graph,
                               n_keys_map):
        """ places verts which have to be on the same chip on minimum chip.

        :param same_chip_vertex_groups:
            groups of verts which want to be on the same chip.
        :type same_chip_vertex_groups: dict(MachineVertex, set(MachineVertex))
        :param chips_in_order: chips in radial order from mid machine
        :type chips_in_order: iterable(tuple(int,int))
        :param Placements placements: placements holder
        :param ~spinn_utilities.progress_bar.ProgressBar progress_bar:
            progress bar
        :param ResourceTracker resource_tracker: resource tracker
        :param set(MachineVertex) placed_vertices:
            vertices which have already been placed
        :param cost_per_chip: map between (x,y) and the cost of packets
        :type cost_per_chip: dict(tuple(int, int), int)
        :param MachineGraph machine_graph:
        :param AbstractMachinePartitionNKeysMap n_keys_map:
        :rtype: None
        """
        for vertex in same_chip_vertex_groups.keys():
            if len(same_chip_vertex_groups[vertex]) != 1:
                if vertex not in placed_vertices:
                    to_do_as_group = list()
                    for other_vert in same_chip_vertex_groups[vertex]:
                        if other_vert not in placed_vertices:
                            to_do_as_group.extend(
                                create_requirement_collections([other_vert],
                                                               machine_graph))

                    # allocate as a group to sorted chips so that ones with
                    # least incoming packets are considered first
                    results = \
                        resource_tracker.allocate_constrained_group_resources(
                            to_do_as_group, chips=chips_in_order)

                    # create placements and add cost to the chip
                    for (x, y, p, _, _), placed_vertex in zip(
                            results, same_chip_vertex_groups[vertex]):
                        placements.add_placement(
                            Placement(placed_vertex, x, y, p))
                        placed_vertices.add(placed_vertex)
                        cost_per_chip[x, y] += self._get_cost(
                            placed_vertex, machine_graph, n_keys_map)

                # resort the chips, as no idea where in the list the resource
                # tracker selected
                chips_in_order = self._sort_chips_based_off_incoming_cost(
                    chips_in_order, cost_per_chip)

        # update progress bar to cover one cycle of all the verts in the graph
        progress_bar.update(len(machine_graph.vertices))
Ejemplo n.º 21
0
def test_convert_to_file_placement(tmpdir):
    v = SimpleMachineVertex(ResourceContainer())
    pl = Placement(v, 1, 2, 3)
    placements = Placements([pl])
    algo = ConvertToFilePlacement()
    fn = tmpdir.join("foo.json")
    filename, _vertex_by_id = algo(placements, str(fn))
    assert filename == str(fn)
    obj = json.loads(fn.read())
    baseline = {ident(v): [1, 2]}
    assert obj == baseline
Ejemplo n.º 22
0
def test_convert_to_file_core_allocations(tmpdir):
    algo = ConvertToFileCoreAllocations()
    fn = tmpdir.join("foo.json")
    algo([], str(fn))
    assert fn.read() == '{"type": "cores"}'

    v = SimpleMachineVertex(ResourceContainer())
    pl = Placement(v, 1, 2, 3)
    filename, _ = algo([pl], str(fn))
    assert filename == str(fn)
    assert fn.read() == '{"type": "cores", "%s": [3, 4]}' % ident(v)
    def test_mixed_binaries(self):
        """ Test calling the binary gatherer with mixed executable types
        """

        vertex_1 = _TestVertexWithBinary("test.aplx", ExecutableType.RUNNING)
        vertex_2 = _TestVertexWithBinary("test2.aplx", ExecutableType.SYNC)

        placements = Placements(placements=[
            Placement(vertex_1, 0, 0, 0),
            Placement(vertex_2, 0, 0, 1)
        ])

        graph = MachineGraph("Test")
        graph.add_vertices([vertex_1, vertex_2])

        gatherer = LocateExecutableStartType()
        results = gatherer.__call__(graph, placements=placements)
        self.assertIn(ExecutableType.RUNNING, results)
        self.assertIn(ExecutableType.SYNC, results)
        self.assertNotIn(ExecutableType.USES_SIMULATION_INTERFACE, results)
        self.assertNotIn(ExecutableType.NO_APPLICATION, results)
Ejemplo n.º 24
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements,
                      location):
        vertices = location[vertex]
        # random x and y value within the maximum of the machine
        chips = self._generate_random_chips(machine)

        if len(vertices) > 1:
            assigned_values = \
                resource_tracker.allocate_constrained_group_resources([
                    (vert.resources_required, vert.constraints)
                    for vert in vertices], chips)
            for (x, y, p, _, _), vert in zip(assigned_values, vertices):
                placement = Placement(vert, x, y, p)
                placements.add_placement(placement)
        else:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, chips)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)

        return vertices
Ejemplo n.º 25
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements,
                      vertices_on_same_chip, machine_graph):
        """
        :param MachineVertex vertex:
        :param ResourceTracker resource_tracker:
        :param ~spinn_machine.Machine machine:
        :param Placements placements:
        :param vertices_on_same_chip:
        :type vertices_on_same_chip: dict(MachineVertex, set(MachineVertex))
        :param MachineGraph machine_graph:
        :rtype: set(MachineVertex)
        """
        vertices = vertices_on_same_chip[vertex]

        # Check for the radial placement constraint
        radial_constraints = [
            c for v in vertices for c in v.constraints
            if isinstance(c, RadialPlacementFromChipConstraint)
        ]
        start_x, start_y = self._get_start(radial_constraints)
        chips = None
        if start_x is not None and start_y is not None:
            chips = self._generate_radial_chips(machine, resource_tracker,
                                                start_x, start_y)

        if len(vertices) > 1:
            assigned_values = \
                resource_tracker.allocate_constrained_group_resources(
                    create_requirement_collections(vertices, machine_graph),
                    chips=chips)
            for (x, y, p, _, _), vert in zip(assigned_values, vertices):
                placement = Placement(vert, x, y, p)
                placements.add_placement(placement)
        else:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, chips=chips)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)

        return vertices
    def __call__(self, extended_machine, placements, allocations, constraints,
                 vertex_by_id):
        """
        :param placements:
        :param allocations:
        :param extended_machine:
        :param constraints:
        """

        # load the json files
        file_placements, core_allocations, constraints = \
            self._load_json_files(placements, allocations, constraints)

        # validate the json files against the schemas
        self._validate_file_read_data(file_placements, core_allocations,
                                      constraints)

        memory_placements = Placements()

        # process placements
        for vertex_id in file_placements:
            if str(vertex_id) not in vertex_by_id:
                if text_type(vertex_id) not in core_allocations:
                    raise PacmanConfigurationException(
                        "I don't recognise this pattern of constraints for"
                        " a vertex which does not have a placement")
                else:
                    raise PacmanConfigurationException(
                        "Failed to locate the vertex in the "
                        "graph with id {}".format(vertex_id))

            if text_type(vertex_id) in core_allocations:
                memory_placements.add_placement(
                    Placement(x=file_placements[vertex_id][0],
                              y=file_placements[vertex_id][1],
                              p=core_allocations[vertex_id][0],
                              vertex=vertex_by_id[str(vertex_id)]))
            else:
                # virtual chip or tag chip
                external_device_constraints = \
                    self._valid_constraints_for_external_device(
                        self._locate_constraints(vertex_id, constraints))
                if external_device_constraints:
                    placements.add(
                        self._make_virtual_placement(
                            extended_machine, vertex_by_id[str(vertex_id)],
                            external_device_constraints))

        # return the file format
        return memory_placements
Ejemplo n.º 27
0
    def _place_vertex(self, vertex, resource_tracker, machine, placements,
                      vertices_on_same_chip):
        """ Creates placements and returns list of vertices placed.

        :param MachineVertex vertex: the vertex that is placed
        :param ResourceTracker resource_tracker:
            tracks the usage of resources of a machine
        :param ~spinn_machine.Machine machine: A SpiNNaker machine object.
        :param Placements placements: Placements of vertices on the machine
        :param vertices_on_same_chip: a dictionary where keys are a vertex
            and values are a list of vertices
        :type vertices_on_same_chip: dict(MachineVertex,list(MachineVertex))
        :return vertices: an iterable of vertices to be placed
        :rtype vertices: list(MachineVertex)
        """

        vertices = vertices_on_same_chip[vertex]
        chips = self._generate_hilbert_chips(machine)

        # prioritize vertices that should be on the same chip
        if len(vertices) > 1:
            assigned_values = \
                resource_tracker.allocate_constrained_group_resources([
                    (vert.resources_required, vert.constraints)
                    for vert in vertices
                ], chips)
            for (x, y, p, _, _), vert in zip(assigned_values, vertices):
                placement = Placement(vert, x, y, p)
                placements.add_placement(placement)
        else:
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, chips)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)

        # returns list of vertices placed
        return vertices
Ejemplo n.º 28
0
    def test_get_placement_of_vertex(self):
        """
        checks the placements get placement method
        """
        subv = list()
        for i in range(5):
            subv.append(SimpleMachineVertex(None, ""))

        pl = list()
        for i in range(4):
            pl.append(Placement(subv[i], 0, 0, i))

        pls = Placements(pl)
        for i in range(4):
            self.assertEqual(pls.get_placement_of_vertex(subv[i]), pl[i])
    def _make_virtual_placement(machine, vertex, constraints):
        # get data for virtual chip
        route_constraint = constraints['end_point']
        route_direction = EDGES(route_constraint['direction'].upper())
        placement_constraint = constraints['placement']
        coords = placement_constraint['location']

        # locate virtual chip
        link = machine.get_chip_at(coords[0], coords[1]).router.get_link(
            route_direction.value)
        destination_chip = machine.get_chip_at(link.destination_x,
                                               link.destination_y)

        # create placement
        return Placement(vertex, destination_chip.x, destination_chip.y, None)
Ejemplo n.º 30
0
    def test_get_placements(self):
        """
        tests the placements iterator functionality.
        """
        subv = list()
        for i in range(5):
            subv.append(SimpleMachineVertex(None, ""))

        pl = list()
        for i in range(4):
            pl.append(Placement(subv[i], 0, 0, i))

        pls = Placements(pl)
        container = pls.placements
        for i in range(4):
            self.assertIn(pl[i], container)