def test_sdram_links(): """ Test sdram edges which should explode """ # Create a graph machine_graph = MachineGraph("Test") # Connect a set of vertices in a chain of length 3 last_vertex = None for x in range(20): vertex = SimpleMachineVertex( resources=ResourceContainer(), label="Vertex_{}".format(x), sdram_cost=20) machine_graph.add_vertex(vertex) last_vertex = vertex for vertex in machine_graph.vertices: machine_graph.add_outgoing_edge_partition( ConstantSDRAMMachinePartition( identifier="SDRAM", pre_vertex=vertex, label="bacon")) edge = SDRAMMachineEdge(vertex, last_vertex, "bacon", app_edge=None) machine_graph.add_edge(edge, "SDRAM") n_keys_map = DictBasedMachinePartitionNKeysMap() # Do placements machine = virtual_machine(width=8, height=8) try: SpreaderPlacer()(machine_graph, machine, n_keys_map, plan_n_timesteps=1000) raise Exception("should blow up here") except PacmanException: pass
class TestDataSpecificationTargets(unittest.TestCase): machine = virtual_machine(2, 2) def test_dict(self): check = dict() testdir = tempfile.mkdtemp() print(testdir) asDict = DataSpecificationTargets(self.machine, testdir) c1 = (0, 0, 0) foo = bytearray(b"foo") with asDict.create_data_spec(0, 0, 0) as writer: writer.write(foo) check[c1] = DataRowReader(foo) self.assertEqual(check[c1], asDict[c1]) c2 = (0, 1, 2) bar = bytearray(b"bar") with asDict.create_data_spec(0, 1, 2) as writer: writer.write(bar) check[c2] = DataRowReader(bar) self.assertEqual(check[c2], asDict[c2]) self.assertEqual(2, len(asDict)) asDict.set_app_id(12) for key in asDict: self.assertEqual(check[key], asDict[key]) (x, y, p) = key self.assertEqual(12, asDict.get_app_id(x, y, p)) for key, value in iteritems(asDict): self.assertEqual(check[key], value)
def test_dict(self): check = dict() machine = virtual_machine(2, 2) tempdir = tempfile.mkdtemp() dst = DataSpecificationTargets(machine, tempdir) print(tempdir) asDict = DsWriteInfo(dst.get_database()) c1 = (0, 0, 0) foo = DataWritten(123, 12, 23) asDict.set_info(*c1, info=foo) check[c1] = foo self.assertEqual(foo, asDict.get_info(*c1)) c2 = (1, 1, 3) bar = DataWritten(456, 45, 56) asDict.set_info(*c2, info=bar) check[c2] = bar self.assertEqual(bar, asDict.get_info(*c2)) self.assertEqual(2, len(asDict)) for key in asDict: self.assertEqual(check[key], asDict.get_info(*key)) for key, value in iteritems(asDict): self.assertEqual(check[key], value)
def test_routing(self): graph = MachineGraph("Test") machine = virtual_machine(2, 2) placements = Placements() vertices = list() for chip in machine.chips: for processor in chip.processors: if not processor.is_monitor: vertex = SimpleMachineVertex(resources=ResourceContainer()) graph.add_vertex(vertex) placements.add_placement( Placement(vertex, chip.x, chip.y, processor.processor_id)) vertices.append(vertex) for vertex in vertices: graph.add_outgoing_edge_partition( MulticastEdgePartition(identifier="Test", pre_vertex=vertex)) for vertex_to in vertices: if vertex != vertex_to: graph.add_edge(MachineEdge(vertex, vertex_to), "Test") router = BasicDijkstraRouting() routing_paths = router.__call__(placements, machine, graph) for vertex in vertices: vertices_reached = set() queue = deque() seen_entries = set() placement = placements.get_placement_of_vertex(vertex) partition = graph.get_outgoing_edge_partition_starting_at_vertex( vertex, "Test") entry = routing_paths.get_entry_on_coords_for_edge( partition, placement.x, placement.y) self.assertEqual(entry.incoming_processor, placement.p) queue.append((placement.x, placement.y)) while len(queue) > 0: x, y = queue.pop() entry = routing_paths.get_entry_on_coords_for_edge( partition, x, y) self.assertIsNotNone(entry) chip = machine.get_chip_at(x, y) for p in entry.processor_ids: self.assertIsNotNone(chip.get_processor_with_id(p)) vertex_found = placements.get_vertex_on_processor(x, y, p) vertices_reached.add(vertex_found) seen_entries.add((x, y)) for link_id in entry.link_ids: link = chip.router.get_link(link_id) self.assertIsNotNone(link) dest_x, dest_y = link.destination_x, link.destination_y if (dest_x, dest_y) not in seen_entries: queue.append((dest_x, dest_y)) for vertex_to in vertices: if vertex != vertex_to: self.assertIn(vertex_to, vertices_reached)
def test_virtual_vertices_spreader(): """ Test that the placer works with a virtual vertex """ # Create a graph with a virtual vertex machine_graph = MachineGraph("Test") virtual_vertex = MachineSpiNNakerLinkVertex( spinnaker_link_id=0, label="Virtual") machine_graph.add_vertex(virtual_vertex) # These vertices are fixed on 0, 0 misc_vertices = list() for i in range(3): misc_vertex = SimpleMachineVertex( resources=ResourceContainer(), constraints=[ ChipAndCoreConstraint(0, 0)], label="Fixed_0_0_{}".format(i)) machine_graph.add_vertex(misc_vertex) misc_vertices.append(misc_vertex) # These vertices are 1-1 connected to the virtual vertex one_to_one_vertices = list() for i in range(16): one_to_one_vertex = SimpleMachineVertex( resources=ResourceContainer(), label="Vertex_{}".format(i)) machine_graph.add_vertex(one_to_one_vertex) edge = MachineEdge(virtual_vertex, one_to_one_vertex) machine_graph.add_edge(edge, "SPIKES") one_to_one_vertices.append(one_to_one_vertex) n_keys_map = DictBasedMachinePartitionNKeysMap() partition = machine_graph.get_outgoing_edge_partition_starting_at_vertex( virtual_vertex, "SPIKES") n_keys_map.set_n_keys_for_partition(partition, 1) # Get and extend the machine for the virtual chip machine = virtual_machine(width=8, height=8) extended_machine = MallocBasedChipIdAllocator()(machine, machine_graph) # Do placements placements = SpreaderPlacer()( machine_graph, extended_machine, n_keys_map, plan_n_timesteps=1000) # The virtual vertex should be on a virtual chip placement = placements.get_placement_of_vertex(virtual_vertex) assert machine.get_chip_at(placement.x, placement.y).virtual # The 0, 0 vertices should be on 0, 0 for vertex in misc_vertices: placement = placements.get_placement_of_vertex(vertex) assert placement.x == placement.y == 0 # The other vertices should *not* be on a virtual chip for vertex in one_to_one_vertices: placement = placements.get_placement_of_vertex(vertex) assert not machine.get_chip_at(placement.x, placement.y).virtual
def test_one_to_one(): """ Test normal 1-1 placement """ # Create a graph machine_graph = MachineGraph("Test") # Connect a set of vertices in a chain of length 3 one_to_one_chains = list() for i in range(10): last_vertex = None chain = list() for j in range(3): vertex = SimpleMachineVertex(resources=ResourceContainer(), label="Vertex_{}_{}".format(i, j)) machine_graph.add_vertex(vertex) if last_vertex is not None: edge = MachineEdge(last_vertex, vertex) machine_graph.add_edge(edge, "SPIKES") last_vertex = vertex chain.append(vertex) one_to_one_chains.append(chain) # Connect a set of 20 vertices in a chain too_many_vertices = list() last_vertex = None for i in range(20): vertex = SimpleMachineVertex(resources=ResourceContainer(), label="Vertex_{}".format(i)) machine_graph.add_vertex(vertex) if last_vertex is not None: edge = MachineEdge(last_vertex, vertex) machine_graph.add_edge(edge, "SPIKES") too_many_vertices.append(vertex) last_vertex = vertex # Do placements machine = virtual_machine(width=8, height=8) placements = OneToOnePlacer()(machine_graph, machine, plan_n_timesteps=1000) # The 1-1 connected vertices should be on the same chip for chain in one_to_one_chains: first_placement = placements.get_placement_of_vertex(chain[0]) for i in range(1, 3): placement = placements.get_placement_of_vertex(chain[i]) assert placement.x == first_placement.x assert placement.y == first_placement.y # The other vertices should be on more than one chip too_many_chips = set() for vertex in too_many_vertices: placement = placements.get_placement_of_vertex(vertex) too_many_chips.add((placement.x, placement.y)) assert len(too_many_chips) > 1
def test_router_compressor_on_error(): routing_tables = MulticastRoutingTables( [UnCompressedMulticastRoutingTable(0, 0)]) transceiver = MockTransceiverError() machine = virtual_machine(width=8, height=8) mundy_on_chip_router_compression(routing_tables, transceiver, machine, app_id=17, system_provenance_folder="")
def test_router_compressor_on_error(): compressor = MundyOnChipRouterCompression() routing_tables = MulticastRoutingTables([MulticastRoutingTable(0, 0)]) transceiver = MockTransceiverError() machine = virtual_machine(version=5) with pytest.raises(SpinnFrontEndException): compressor(routing_tables, transceiver, machine, app_id=17, provenance_file_path="")
def __call__(self, hbp_server_url, total_run_time): """ :param hbp_server_url: \ The URL of the HBP server from which to get the machine :param total_run_time: The total run time to request """ max_machine = self._max_machine_request(hbp_server_url, total_run_time) # Return the width and height and assume that it has wrap arounds return virtual_machine(width=max_machine["width"], height=max_machine["height"], with_wrap_arounds=None, version=None, validate=False)
def __call__(self, spalloc_server, spalloc_port=22244, spalloc_machine=None, max_sdram_size=None, max_machine_core_reduction=0): """ :param str spalloc_server: :param int spalloc_port: :param str spalloc_machine: :param int max_sdram_size: :param int max_machine_core_reduction: :rtype: ~.Machine """ with ProtocolClient(spalloc_server, spalloc_port) as client: machines = client.list_machines() # Close the context immediately; don't want to keep this particular # connection around as there's not a great chance of this code # being rerun in this process any time soon. max_width = None max_height = None max_area = -1 for machine in self._filter(machines, spalloc_machine): # Get the width and height in chips, and logical area in chips**2 width, height, area = self._get_size(machine) # The "biggest" board is the one with the most chips if area > max_area: max_area = area max_width = width max_height = height if max_width is None: raise Exception( "The spalloc server appears to have no compatible machines") n_cpus_per_chip = (Machine.max_cores_per_chip() - max_machine_core_reduction) # Return the width and height, and make no assumption about wrap- # arounds or version. return virtual_machine(width=max_width, height=max_height, sdram_per_chip=max_sdram_size, n_cpus_per_chip=n_cpus_per_chip, validate=False)
def __call__(self, hbp_server_url, total_run_time, max_machine_core_reduction=0): """ :param str hbp_server_url: :param int total_run_time: :param int max_machine_core_reduction: :rtype: ~.Machine """ max_machine = self._max_machine_request(hbp_server_url, total_run_time) n_cpus_per_chip = (Machine.max_cores_per_chip() - max_machine_core_reduction) # Return the width and height and assume that it has wrap arounds return virtual_machine(width=max_machine["width"], height=max_machine["height"], n_cpus_per_chip=n_cpus_per_chip, validate=False)
def test_call(self): executor = HostExecuteDataSpecification() transceiver = _MockTransceiver(user_0_addresses={0: 1000}) machine = virtual_machine(2, 2) tempdir = tempfile.mkdtemp() dsg_targets = DataSpecificationTargets(machine, tempdir) with dsg_targets.create_data_spec(0, 0, 0) as spec_writer: spec = DataSpecificationGenerator(spec_writer) spec.reserve_memory_region(0, 100) spec.reserve_memory_region(1, 100, empty=True) spec.reserve_memory_region(2, 100) spec.switch_write_focus(0) spec.write_value(0) spec.write_value(1) spec.write_value(2) spec.switch_write_focus(2) spec.write_value(3) spec.end_specification() region_sizes = dict() region_sizes[0, 0, 0] = (APP_PTR_TABLE_BYTE_SIZE + sum(spec.region_sizes)) # Execute the spec targets = ExecutableTargets() targets.add_processor("text.aplx", 0, 0, 0, ExecutableType.USES_SIMULATION_INTERFACE) infos = executor.execute_application_data_specs( transceiver, machine, 30, dsg_targets, False, targets, report_folder=tempdir, region_sizes=region_sizes) # Test regions - although 3 are created, only 2 should be uploaded # (0 and 2), and only the data written should be uploaded # The space between regions should be as allocated regardless of # how much data is written header_and_table_size = (MAX_MEM_REGIONS + 2) * BYTES_PER_WORD regions = transceiver.regions_written self.assertEqual(len(regions), 4) # Base address for header and table self.assertEqual(regions[1][0], 0) # Base address for region 0 (after header and table) self.assertEqual(regions[2][0], header_and_table_size) # Base address for region 2 self.assertEqual(regions[3][0], header_and_table_size + 200) # User 0 write address self.assertEqual(regions[0][0], 1000) # Size of header and table self.assertEqual(len(regions[1][1]), header_and_table_size) # Size of region 0 self.assertEqual(len(regions[2][1]), 12) # Size of region 2 self.assertEqual(len(regions[3][1]), 4) # Size of user 0 self.assertEqual(len(regions[0][1]), 4) info = infos[(0, 0, 0)] self.assertEqual(info.memory_used, 372) self.assertEqual(info.memory_written, 88)
def test_write_data_spec(): unittest_setup() # UGLY but the mock transceiver NEED generate_on_machine to be False AbstractGenerateConnectorOnMachine.generate_on_machine = say_false machine = virtual_machine(2, 2) p.setup(1.0) load_config() p.set_number_of_neurons_per_core(p.IF_curr_exp, 100) pre_pop = p.Population(10, p.IF_curr_exp(), label="Pre", additional_parameters={ "splitter": SplitterAbstractPopulationVertexSlice() }) post_pop = p.Population(10, p.IF_curr_exp(), label="Post", additional_parameters={ "splitter": SplitterAbstractPopulationVertexSlice() }) proj_one_to_one_1 = p.Projection(pre_pop, post_pop, p.OneToOneConnector(), p.StaticSynapse(weight=1.5, delay=1.0)) proj_one_to_one_2 = p.Projection(pre_pop, post_pop, p.OneToOneConnector(), p.StaticSynapse(weight=2.5, delay=2.0)) proj_all_to_all = p.Projection( pre_pop, post_pop, p.AllToAllConnector(allow_self_connections=False), p.StaticSynapse(weight=4.5, delay=4.0)) from_list_list = [(i, i, i, (i * 5) + 1) for i in range(10)] proj_from_list = p.Projection(pre_pop, post_pop, p.FromListConnector(from_list_list), p.StaticSynapse()) app_graph = globals_variables.get_simulator().original_application_graph context = {"ApplicationGraph": app_graph} with (injection_context(context)): delay_support_adder(app_graph) machine_graph, _ = spynnaker_splitter_partitioner( app_graph, machine, 100) allocator = ZonedRoutingInfoAllocator() n_keys_map = edge_to_n_keys_mapper(machine_graph) routing_info = allocator.__call__(machine_graph, n_keys_map, flexible=False) post_vertex = next(iter(post_pop._vertex.machine_vertices)) post_vertex_slice = post_vertex.vertex_slice post_vertex_placement = Placement(post_vertex, 0, 0, 3) temp_spec = tempfile.mktemp() spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None) synaptic_matrices = SynapticMatrices(post_vertex_slice, n_synapse_types=2, all_single_syn_sz=10000, synaptic_matrix_region=1, direct_matrix_region=2, poptable_region=3, connection_builder_region=4) synaptic_matrices.write_synaptic_data( spec, post_pop._vertex.incoming_projections, all_syn_block_sz=10000, weight_scales=[32, 32], routing_info=routing_info) spec.end_specification() with io.FileIO(temp_spec, "rb") as spec_reader: executor = DataSpecificationExecutor(spec_reader, 20000) executor.execute() all_data = bytearray() all_data.extend(bytearray(executor.get_header())) all_data.extend(bytearray(executor.get_pointer_table(0))) for r in range(MAX_MEM_REGIONS): region = executor.get_region(r) if region is not None: all_data.extend(region.region_data) transceiver = MockTransceiverRawData(all_data) report_folder = mkdtemp() try: connections_1 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_one_to_one_1._projection_edge, proj_one_to_one_1._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms assert all([conn["weight"] == 1.5 for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) connections_2 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_one_to_one_2._projection_edge, proj_one_to_one_2._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms assert all([conn["weight"] == 2.5 for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) connections_3 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_all_to_all._projection_edge, proj_all_to_all._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_3) == 100 assert all([conn["weight"] == 4.5 for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3]) connections_4 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_from_list._projection_edge, proj_from_list._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_4) == len(from_list_list) list_weights = [values[2] for values in from_list_list] list_delays = [values[3] for values in from_list_list] assert all(list_weights == connections_4["weight"]) assert all(list_delays == connections_4["delay"]) finally: shutil.rmtree(report_folder, ignore_errors=True)
def test_pop_based_master_pop_table_standard(undelayed_indices_connected, delayed_indices_connected, n_pre_neurons, neurons_per_core, expect_app_keys, max_delay): unittest_setup() machine = virtual_machine(12, 12) # Build a from list connector with the delays we want connections = [] connections.extend([(i * neurons_per_core + j, j, 0, 10) for i in undelayed_indices_connected for j in range(100)]) connections.extend([(i * neurons_per_core + j, j, 0, max_delay) for i in delayed_indices_connected for j in range(100)]) # Make simple source and target, where the source has 1000 atoms # split into 10 vertices (100 each) and the target has 100 atoms in # a single vertex p.setup(1.0) post_pop = p.Population(100, p.IF_curr_exp(), label="Post", additional_parameters={ "splitter": SplitterAbstractPopulationVertexSlice() }) p.IF_curr_exp.set_model_max_atoms_per_core(neurons_per_core) pre_pop = p.Population(n_pre_neurons, p.IF_curr_exp(), label="Pre", additional_parameters={ "splitter": SplitterAbstractPopulationVertexSlice() }) p.Projection(pre_pop, post_pop, p.FromListConnector(connections), p.StaticSynapse()) app_graph = globals_variables.get_simulator().original_application_graph context = {"ApplicationGraph": app_graph} with (injection_context(context)): delay_support_adder(app_graph) machine_graph, _ = spynnaker_splitter_partitioner( app_graph, machine, 100) allocator = ZonedRoutingInfoAllocator() n_keys_map = edge_to_n_keys_mapper(machine_graph) routing_info = allocator.__call__(machine_graph, n_keys_map, flexible=False) post_mac_vertex = next(iter(post_pop._vertex.machine_vertices)) post_vertex_slice = post_mac_vertex.vertex_slice # Generate the data temp_spec = tempfile.mktemp() spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None) synaptic_matrices = SynapticMatrices(post_vertex_slice, n_synapse_types=2, all_single_syn_sz=10000, synaptic_matrix_region=1, direct_matrix_region=2, poptable_region=3, connection_builder_region=4) synaptic_matrices.write_synaptic_data( spec, post_pop._vertex.incoming_projections, all_syn_block_sz=1000000, weight_scales=[32, 32], routing_info=routing_info) with io.FileIO(temp_spec, "rb") as spec_reader: executor = DataSpecificationExecutor(spec_reader, SDRAM.max_sdram_found) executor.execute() # Read the population table and check entries region = executor.get_region(3) mpop_data = numpy.frombuffer(region.region_data, dtype="uint8").view("uint32") n_entries = mpop_data[0] n_addresses = mpop_data[1] # Compute how many entries and addresses there should be expected_n_entries = 0 expected_n_addresses = 0 if expect_app_keys: # Always one for undelayed, maybe one for delayed if present n_app_entries = 1 + int(bool(delayed_indices_connected)) expected_n_entries += n_app_entries # 2 address list entries for each entry, as there is also extra_info expected_n_addresses += 2 * n_app_entries # If both delayed and undelayed, there is an entry for each incoming # machine edge elif delayed_indices_connected and undelayed_indices_connected: all_connected = set(undelayed_indices_connected) all_connected.update(delayed_indices_connected) expected_n_entries += len(all_connected) expected_n_addresses += len(all_connected) # If there are only undelayed indices, there is an entry for each elif undelayed_indices_connected: expected_n_entries += len(undelayed_indices_connected) expected_n_addresses += len(undelayed_indices_connected) # If there are only delayed indices, there are two entries for each because # the undelayed ones are still connected else: expected_n_entries += 2 * len(delayed_indices_connected) expected_n_addresses += 2 * len(delayed_indices_connected) assert (n_entries == expected_n_entries) assert (n_addresses == expected_n_addresses)
def test_write_data_spec(): unittest_setup() # UGLY but the mock transceiver NEED generate_on_machine to be False AbstractGenerateConnectorOnMachine.generate_on_machine = say_false machine = virtual_machine(2, 2) p.setup(1.0) load_config() p.set_number_of_neurons_per_core(p.IF_curr_exp, 100) pre_pop = p.Population( 10, p.IF_curr_exp(), label="Pre", additional_parameters={ "splitter": SplitterAbstractPopulationVertexSlice()}) post_pop = p.Population( 10, p.IF_curr_exp(), label="Post", additional_parameters={ "splitter": SplitterAbstractPopulationVertexSlice()}) proj_one_to_one_1 = p.Projection( pre_pop, post_pop, p.OneToOneConnector(), p.StaticSynapse(weight=1.5, delay=1.0)) proj_one_to_one_2 = p.Projection( pre_pop, post_pop, p.OneToOneConnector(), p.StaticSynapse(weight=2.5, delay=2.0)) proj_all_to_all = p.Projection( pre_pop, post_pop, p.AllToAllConnector(allow_self_connections=False), p.StaticSynapse(weight=4.5, delay=4.0)) # spynnaker8.setup(timestep=1) # # Add an sdram so max SDRAM is high enough # SDRAM(10000) # # set_config("Simulation", "one_to_one_connection_dtcm_max_bytes", 40) # # placements = Placements() # pre_app_population = MockPopulation(10, "mock pop pre") # pre_app_vertex = SimpleTestVertex(10, label="pre") # pre_app_vertex.splitter = MockSplitter() # pre_app_vertex.splitter._called = True # pre_vertex_slice = Slice(0, 9) # # post_app_population = MockPopulation(10, "mock pop post") # pre_vertex = pre_app_vertex.create_machine_vertex( # pre_vertex_slice, None) # placements.add_placement(Placement(pre_vertex, 0, 0, 1)) # post_app_vertex = SimpleTestVertex(10, label="post") # post_app_vertex.splitter = MockSplitter() # post_app_vertex.splitter._called = True # post_vertex_slice = Slice(0, 9) # post_vertex = post_app_vertex.create_machine_vertex( # post_vertex_slice, None) # post_vertex_placement = Placement(post_vertex, 0, 0, 2) # placements.add_placement(post_vertex_placement) # delay_app_vertex = DelayExtensionVertex( # 10, 16, 51, pre_app_vertex, label="delay") # delay_app_vertex.set_new_n_delay_stages_and_delay_per_stage( # 16, 51) # delay_app_vertex.splitter = SplitterDelayVertexSlice( # pre_app_vertex.splitter) # delay_vertex = DelayExtensionMachineVertex( # resources_required=None, label="", constraints=[], # app_vertex=delay_app_vertex, vertex_slice=post_vertex_slice) # placements.add_placement(Placement(delay_vertex, 0, 0, 3)) # one_to_one_connector_1 = OneToOneConnector(None) # direct_synapse_information_1 = SynapseInformation( # one_to_one_connector_1, pre_app_population, post_app_population, # False, False, None, SynapseDynamicsStatic(), 0, True, 1.5, 1.0) # one_to_one_connector_1.set_projection_information( # direct_synapse_information_1) # one_to_one_connector_2 = OneToOneConnector(None) # direct_synapse_information_2 = SynapseInformation( # one_to_one_connector_2, pre_app_population, post_app_population, # False, False, None, SynapseDynamicsStatic(), 1, True, 2.5, 2.0) # one_to_one_connector_2.set_projection_information( # direct_synapse_information_2) # all_to_all_connector = AllToAllConnector(False) # all_to_all_synapse_information = SynapseInformation( # all_to_all_connector, pre_app_population, post_app_population, # False, False, None, SynapseDynamicsStatic(), 0, True, 4.5, 4.0) # all_to_all_connector.set_projection_information( # all_to_all_synapse_information) from_list_list = [(i, i, i, (i * 5) + 1) for i in range(10)] proj_from_list = p.Projection( pre_pop, post_pop, p.FromListConnector(from_list_list), p.StaticSynapse()) app_graph = globals_variables.get_simulator().original_application_graph context = { "ApplicationGraph": app_graph } with (injection_context(context)): delay_adder = DelaySupportAdder() delay_adder.__call__(app_graph) partitioner = SpynnakerSplitterPartitioner() machine_graph, _ = partitioner.__call__(app_graph, machine, 100) allocator = ZonedRoutingInfoAllocator() n_keys_mapper = EdgeToNKeysMapper() n_keys_map = n_keys_mapper.__call__(machine_graph) routing_info = allocator.__call__( machine_graph, n_keys_map, flexible=False) post_vertex = next(iter(post_pop._vertex.machine_vertices)) post_vertex_slice = post_vertex.vertex_slice post_vertex_placement = Placement(post_vertex, 0, 0, 3) temp_spec = tempfile.mktemp() spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None) synaptic_matrices = SynapticMatrices( post_vertex_slice, n_synapse_types=2, all_single_syn_sz=10000, synaptic_matrix_region=1, direct_matrix_region=2, poptable_region=3, connection_builder_region=4) synaptic_matrices.write_synaptic_data( spec, post_pop._vertex.incoming_projections, all_syn_block_sz=10000, weight_scales=[32, 32], routing_info=routing_info) spec.end_specification() with io.FileIO(temp_spec, "rb") as spec_reader: executor = DataSpecificationExecutor(spec_reader, 20000) executor.execute() all_data = bytearray() all_data.extend(bytearray(executor.get_header())) all_data.extend(bytearray(executor.get_pointer_table(0))) for r in range(MAX_MEM_REGIONS): region = executor.get_region(r) if region is not None: all_data.extend(region.region_data) transceiver = MockTransceiverRawData(all_data) report_folder = mkdtemp() try: connections_1 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_one_to_one_1._projection_edge, proj_one_to_one_1._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms assert all([conn["weight"] == 1.5 for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) connections_2 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_one_to_one_2._projection_edge, proj_one_to_one_2._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms assert all([conn["weight"] == 2.5 for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) connections_3 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_all_to_all._projection_edge, proj_all_to_all._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_3) == 100 assert all([conn["weight"] == 4.5 for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3]) connections_4 = numpy.concatenate( synaptic_matrices.get_connections_from_machine( transceiver, post_vertex_placement, proj_from_list._projection_edge, proj_from_list._synapse_information)) # Check that all the connections have the right weight and delay assert len(connections_4) == len(from_list_list) list_weights = [values[2] for values in from_list_list] list_delays = [values[3] for values in from_list_list] assert all(list_weights == connections_4["weight"]) assert all(list_delays == connections_4["delay"]) finally: shutil.rmtree(report_folder, ignore_errors=True)