def create_app_less(): app_graph = ApplicationGraph("Test") mac_graph = MachineGraph("Test", app_graph) n_keys_map = DictBasedMachinePartitionNKeysMap() # An output vertex to aim things at (to make keys required) out_mac_vertex = SimpleMacVertex() mac_graph.add_vertex(out_mac_vertex) # Create 5 application vertices (3 bits) for app_index in range(5): # For each, create up to (5 x 4) + 1 = 21 machine vertices (5 bits) for mac_index in range((app_index * 4) + 1): mac_vertex = SimpleMacVertex() mac_graph.add_vertex(mac_vertex) # For each machine vertex create up to # (20 x 2) + 1 = 81(!) partitions (6 bits) for mac_edge_index in range((mac_index * 2) + 1): mac_edge = MachineEdge(mac_vertex, out_mac_vertex) part_name = "Part{}".format(mac_edge_index) mac_graph.add_edge(mac_edge, part_name) # Give the partition up to (40 x 4) + 1 = 161 keys (8 bits) p = mac_graph.get_outgoing_edge_partition_starting_at_vertex( mac_vertex, part_name) n_keys_map.set_n_keys_for_partition( p, (mac_edge_index * 4) + 1) return app_graph, mac_graph, n_keys_map
def create_graphs_only_fixed(): app_graph = ApplicationGraph("Test") # An output vertex to aim things at (to make keys required) out_app_vertex = SimpleAppVertex() app_graph.add_vertex(out_app_vertex) # Create 5 application vertices (3 bits) app_vertex = SimpleAppVertex() app_graph.add_vertex(app_vertex) mac_graph = MachineGraph("Test", app_graph) n_keys_map = DictBasedMachinePartitionNKeysMap() # An output vertex to aim things at (to make keys required) out_mac_vertex = SimpleMacVertex(app_vertex=out_app_vertex) mac_graph.add_vertex(out_mac_vertex) mac_vertex = SimpleMacVertex(app_vertex=app_vertex) mac_graph.add_vertex(mac_vertex) for mac_edge_index in range(2): mac_edge = MachineEdge(mac_vertex, out_mac_vertex) part_name = "Part{}".format(mac_edge_index) mac_graph.add_edge(mac_edge, part_name) p = mac_graph.get_outgoing_edge_partition_starting_at_vertex( mac_vertex, part_name) if (mac_edge_index == 0): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0x4c00000, 0xFFFFFFFE)])) if (mac_edge_index == 1): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0x4c00000, 0xFFFFFFFF)])) n_keys_map.set_n_keys_for_partition( p, (mac_edge_index * 4) + 1) return app_graph, mac_graph, n_keys_map
def test_create_constraints_to_file(tmpdir): # Construct the sample machine and graph machine = VirtualMachine(version=3, with_wrap_arounds=None) # TODO: define some extra monitor cores (how?) graph = MachineGraph("foo") tag1 = IPtagResource("1.2.3.4", 5, False, tag="footag") tag2 = ReverseIPtagResource(tag="bartag") v0 = SimpleMachineVertex(ResourceContainer(iptags=[tag1], reverse_iptags=[tag2]), constraints=[ChipAndCoreConstraint(1, 1, 3)]) graph.add_vertex(v0) v0_id = ident(v0) v1 = MachineSpiNNakerLinkVertex(2, constraints=[ChipAndCoreConstraint(1, 1)]) v1.set_virtual_chip_coordinates(0, 2) graph.add_vertex(v1) v1_id = ident(v1) algo = CreateConstraintsToFile() fn = tmpdir.join("foo.json") filename, mapping = algo(graph, machine, str(fn)) assert filename == str(fn) for vid in mapping: assert vid in [v0_id, v1_id] assert vid == ident(mapping[vid]) obj = json.loads(fn.read()) baseline = [{ "type": "reserve_resource", "location": None, "reservation": [0, 1], "resource": "cores" }, { "type": "location", "location": [1, 1], "vertex": v0_id }, { "type": "resource", "resource": "cores", "range": [3, 4], "vertex": v0_id }, { "type": "resource", "resource": "iptag", "range": [0, 1], "vertex": v0_id }, { "type": "resource", "resource": "reverse_iptag", "range": [0, 1], "vertex": v0_id }, { "type": "route_endpoint", "direction": "south", "vertex": v1_id }, { "type": "location", "location": [1, 0], "vertex": v1_id }] assert obj == baseline
def test_memory_io(): vertex = MyVertex() graph = MachineGraph("Test") graph.add_vertex(vertex) placements = Placements() placements.add_placement(Placement(vertex, 0, 0, 1)) transceiver = _MockTransceiver() temp = tempfile.mkdtemp() print("ApplicationDataFolder = {}".format(temp)) inputs = { "MemoryTransceiver": transceiver, "MemoryMachineGraph": graph, "MemoryPlacements": placements, "IPAddress": "testing", "ApplicationDataFolder": temp, "APPID": 30 } algorithms = ["WriteMemoryIOData"] executor = PACMANAlgorithmExecutor( algorithms, [], inputs, [], [], [], xml_paths=get_front_end_common_pacman_xml_paths()) executor.execute_mapping() assert (vertex._test_tag == vertex._tag)
def create_graphs_no_edge(): app_graph = ApplicationGraph("Test") # An output vertex to aim things at (to make keys required) out_app_vertex = SimpleAppVertex() app_graph.add_vertex(out_app_vertex) # Create 5 application vertices (3 bits) app_vertex = SimpleAppVertex() app_graph.add_vertex(app_vertex) mac_graph = MachineGraph("Test", app_graph) n_keys_map = DictBasedMachinePartitionNKeysMap() # An output vertex to aim things at (to make keys required) out_mac_vertex = SimpleMacVertex(app_vertex=out_app_vertex) mac_graph.add_vertex(out_mac_vertex) mac_vertex = SimpleMacVertex(app_vertex=app_vertex) mac_graph.add_vertex(mac_vertex) return app_graph, mac_graph, n_keys_map
def test_write_synaptic_matrix_and_master_population_table(self): MockSimulator.setup() default_config_paths = os.path.join( os.path.dirname(abstract_spinnaker_common.__file__), AbstractSpiNNakerCommon.CONFIG_FILE_NAME) config = conf_loader.load_config( AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40) machine_time_step = 1000.0 pre_app_vertex = SimpleApplicationVertex(10) pre_vertex = SimpleMachineVertex(resources=None) pre_vertex_slice = Slice(0, 9) post_app_vertex = SimpleApplicationVertex(10) post_vertex = SimpleMachineVertex(resources=None) post_vertex_slice = Slice(0, 9) post_slice_index = 0 one_to_one_connector_1 = OneToOneConnector(None) one_to_one_connector_1.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) one_to_one_connector_1.set_weights_and_delays(1.5, 1.0) one_to_one_connector_2 = OneToOneConnector(None) one_to_one_connector_2.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) one_to_one_connector_2.set_weights_and_delays(2.5, 2.0) all_to_all_connector = AllToAllConnector(None) all_to_all_connector.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) all_to_all_connector.set_weights_and_delays(4.5, 4.0) direct_synapse_information_1 = SynapseInformation( one_to_one_connector_1, SynapseDynamicsStatic(), 0) direct_synapse_information_2 = SynapseInformation( one_to_one_connector_2, SynapseDynamicsStatic(), 1) all_to_all_synapse_information = SynapseInformation( all_to_all_connector, SynapseDynamicsStatic(), 0) app_edge = ProjectionApplicationEdge( pre_app_vertex, post_app_vertex, direct_synapse_information_1) app_edge.add_synapse_information(direct_synapse_information_2) app_edge.add_synapse_information(all_to_all_synapse_information) machine_edge = ProjectionMachineEdge( app_edge.synapse_information, pre_vertex, post_vertex) partition_name = "TestPartition" graph = MachineGraph("Test") graph.add_vertex(pre_vertex) graph.add_vertex(post_vertex) graph.add_edge(machine_edge, partition_name) graph_mapper = GraphMapper() graph_mapper.add_vertex_mapping( pre_vertex, pre_vertex_slice, pre_app_vertex) graph_mapper.add_vertex_mapping( post_vertex, post_vertex_slice, post_app_vertex) graph_mapper.add_edge_mapping(machine_edge, app_edge) weight_scales = [4096.0, 4096.0] key = 0 routing_info = RoutingInfo() routing_info.add_partition_info(PartitionRoutingInfo( [BaseKeyAndMask(key, 0xFFFFFFF0)], graph.get_outgoing_edge_partition_starting_at_vertex( pre_vertex, partition_name))) temp_spec = tempfile.mktemp() spec_writer = FileDataWriter(temp_spec) spec = DataSpecificationGenerator(spec_writer, None) master_pop_sz = 1000 master_pop_region = 0 all_syn_block_sz = 2000 synapse_region = 1 spec.reserve_memory_region(master_pop_region, master_pop_sz) spec.reserve_memory_region(synapse_region, all_syn_block_sz) synapse_type = MockSynapseType() synaptic_manager = SynapticManager( synapse_type=synapse_type, ring_buffer_sigma=5.0, spikes_per_second=100.0, config=config) synaptic_manager._write_synaptic_matrix_and_master_population_table( spec, [post_vertex_slice], post_slice_index, post_vertex, post_vertex_slice, all_syn_block_sz, weight_scales, master_pop_region, synapse_region, routing_info, graph_mapper, graph, machine_time_step) spec.end_specification() spec_writer.close() spec_reader = FileDataReader(temp_spec) executor = DataSpecificationExecutor( spec_reader, master_pop_sz + all_syn_block_sz) executor.execute() master_pop_table = executor.get_region(0) synaptic_matrix = executor.get_region(1) all_data = bytearray() all_data.extend(master_pop_table.region_data[ :master_pop_table.max_write_pointer]) all_data.extend(synaptic_matrix.region_data[ :synaptic_matrix.max_write_pointer]) master_pop_table_address = 0 synaptic_matrix_address = master_pop_table.max_write_pointer direct_synapses_address = struct.unpack_from( "<I", synaptic_matrix.region_data)[0] direct_synapses_address += synaptic_matrix_address + 8 indirect_synapses_address = synaptic_matrix_address + 4 placement = Placement(None, 0, 0, 1) transceiver = MockTransceiverRawData(all_data) # Get the master population table details items = synaptic_manager._poptable_type\ .extract_synaptic_matrix_data_location( key, master_pop_table_address, transceiver, placement.x, placement.y) # The first entry should be direct, but the rest should be indirect; # the second is potentially direct, but has been restricted by the # restriction on the size of the direct matrix assert len(items) == 3 # TODO: This has been changed because direct matrices are disabled! assert not items[0][2] assert not items[1][2] assert not items[2][2] data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=0, using_extra_monitor_cores=False) connections_1 = synaptic_manager._synapse_io.read_synapses( direct_synapse_information_1, pre_vertex_slice, post_vertex_slice, row_len_1, 0, 2, weight_scales, data_1, None, app_edge.n_delay_stages, machine_time_step) # The first matrix is a 1-1 matrix, so row length is 1 assert row_len_1 == 1 # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms assert all([conn["weight"] == 1.5 for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=1, using_extra_monitor_cores=False) connections_2 = synaptic_manager._synapse_io.read_synapses( direct_synapse_information_2, pre_vertex_slice, post_vertex_slice, row_len_2, 0, 2, weight_scales, data_2, None, app_edge.n_delay_stages, machine_time_step) # The second matrix is a 1-1 matrix, so row length is 1 assert row_len_2 == 1 # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms assert all([conn["weight"] == 2.5 for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=2, using_extra_monitor_cores=False) connections_3 = synaptic_manager._synapse_io.read_synapses( all_to_all_synapse_information, pre_vertex_slice, post_vertex_slice, row_len_3, 0, 2, weight_scales, data_3, None, app_edge.n_delay_stages, machine_time_step) # The third matrix is an all-to-all matrix, so length is n_atoms assert row_len_3 == post_vertex_slice.n_atoms # Check that all the connections have the right weight and delay assert len(connections_3) == \ post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms assert all([conn["weight"] == 4.5 for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3])
def create_graphs1(with_fixed): app_graph = ApplicationGraph("Test") # An output vertex to aim things at (to make keys required) out_app_vertex = SimpleAppVertex() app_graph.add_vertex(out_app_vertex) # Create 5 application vertices (3 bits) app_vertices = list() for app_index in range(5): app_vertices.append(SimpleAppVertex()) app_graph.add_vertices(app_vertices) mac_graph = MachineGraph("Test", app_graph) n_keys_map = DictBasedMachinePartitionNKeysMap() # An output vertex to aim things at (to make keys required) out_mac_vertex = SimpleMacVertex(app_vertex=out_app_vertex) mac_graph.add_vertex(out_mac_vertex) # Create 5 application vertices (3 bits) for app_index, app_vertex in enumerate(app_vertices): # For each, create up to (5 x 4) + 1 = 21 machine vertices (5 bits) for mac_index in range((app_index * 4) + 1): mac_vertex = SimpleMacVertex(app_vertex=app_vertex) mac_graph.add_vertex(mac_vertex) # For each machine vertex create up to # (20 x 2) + 1 = 81(!) partitions (6 bits) for mac_edge_index in range((mac_index * 2) + 1): mac_edge = MachineEdge(mac_vertex, out_mac_vertex) part_name = "Part{}".format(mac_edge_index) mac_graph.add_edge(mac_edge, part_name) # Give the partition up to (40 x 4) + 1 = 161 keys (8 bits) p = mac_graph.get_outgoing_edge_partition_starting_at_vertex( mac_vertex, part_name) if with_fixed: if (app_index == 2 and mac_index == 4 and part_name == "Part7"): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0xFE00000, 0xFFFFFFC0)])) if (app_index == 2 and mac_index == 0 and part_name == "Part1"): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0x4c00000, 0xFFFFFFFE)])) if (app_index == 2 and mac_index == 0 and part_name == "Part1"): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0x4c00000, 0xFFFFFFFF)])) if (app_index == 3 and mac_index == 0 and part_name == "Part1"): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0x3300000, 0xFFFFFFFF)])) if (app_index == 3 and mac_index == 0 and part_name == "Part1"): p.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(0x3300001, 0)])) n_keys_map.set_n_keys_for_partition( p, (mac_edge_index * 4) + 1) return app_graph, mac_graph, n_keys_map
def create_big(with_fixed): # This test shows how easy it is to trip up the allocator with a retina app_graph = ApplicationGraph("Test") # Create a single "big" vertex big_app_vertex = SimpleAppVertex(label="Retina") app_graph.add_vertex(big_app_vertex) # Create a single output vertex (which won't send) out_app_vertex = SimpleAppVertex(label="Destination") app_graph.add_vertex(out_app_vertex) # Create a load of middle vertex mid_app_vertex = SimpleAppVertex("Population") app_graph.add_vertex(mid_app_vertex) mac_graph = MachineGraph("Test", app_graph) n_keys_map = DictBasedMachinePartitionNKeysMap() # Create a single big machine vertex big_mac_vertex = SimpleMacVertex(app_vertex=big_app_vertex, label="RETINA") mac_graph.add_vertex(big_mac_vertex) # Create a single output vertex (which won't send) out_mac_vertex = SimpleMacVertex(app_vertex=out_app_vertex) mac_graph.add_vertex(out_mac_vertex) # Create a load of middle vertices and connect them up for _ in range(2000): # 2000 needs 11 bits mid_mac_vertex = SimpleMacVertex(app_vertex=mid_app_vertex) mac_graph.add_vertex(mid_mac_vertex) edge = MachineEdge(big_mac_vertex, mid_mac_vertex) mac_graph.add_edge(edge, "Test") edge_2 = MachineEdge(mid_mac_vertex, out_mac_vertex) mac_graph.add_edge(edge_2, "Test") mid_part = mac_graph.get_outgoing_edge_partition_starting_at_vertex( mid_mac_vertex, "Test") n_keys_map.set_n_keys_for_partition(mid_part, 100) big_mac_part = mac_graph.get_outgoing_edge_partition_starting_at_vertex( big_mac_vertex, "Test") if with_fixed: big_mac_part.add_constraint(FixedKeyAndMaskConstraint([ BaseKeyAndMask(0x0, 0x180000)])) # Make the "retina" need 21 bits, so total is now 21 + 11 = 32 bits, # but the application vertices need some bits too n_keys_map.set_n_keys_for_partition(big_mac_part, 1024 * 768 * 2) return app_graph, mac_graph, n_keys_map
def test_convert_to_file_machine_graph_pure_multicast(tmpdir): # Construct the sample graph graph = MachineGraph("foo") v0 = SimpleMachineVertex(ResourceContainer()) graph.add_vertex(v0) tag = IPtagResource("1.2.3.4", 5, False, tag="footag") v1 = SimpleMachineVertex(ResourceContainer(iptags=[tag])) graph.add_vertex(v1) t1id = md5("%s_tag" % ident(v1)) tag = ReverseIPtagResource(tag="bartag") v2 = SimpleMachineVertex(ResourceContainer(reverse_iptags=[tag])) graph.add_vertex(v2) t2id = md5("%s_tag" % ident(v2)) graph.add_edge(MachineEdge(v1, v0), "part1") p1 = graph.get_outgoing_edge_partition_starting_at_vertex(v1, "part1") graph.add_edge(MachineEdge(v0, v2, label="foobar"), "part2") p2 = graph.get_outgoing_edge_partition_starting_at_vertex(v0, "part2") # Convert it to JSON algo = ConvertToFileMachineGraphPureMulticast() fn = tmpdir.join("foo.json") filename, _vertex_by_id, _partition_by_id = algo(graph, str(fn)) assert filename == str(fn) # Rebuild and compare; simplest way of checking given that order is not # preserved in the underlying string and altering that is hard obj = json.loads(fn.read()) baseline = { "vertices_resources": { ident(v0): { "cores": 1, "sdram": 0 }, ident(v1): { "cores": 1, "sdram": 0 }, t1id: { "cores": 0, "sdram": 0 }, ident(v2): { "cores": 1, "sdram": 0 }, t2id: { "cores": 0, "sdram": 0 } }, "edges": { ident(p1): { "source": ident(v1), "sinks": [ident(v0)], "type": "multicast", "weight": 1 }, ident(p2): { "source": ident(v0), "sinks": [ident(v2)], "type": "multicast", "weight": 1 }, t1id: { "source": ident(v1), "sinks": [t1id], "weight": 1.0, "type": "FAKE_TAG_EDGE" }, t2id: { "source": ident(v2), "sinks": [t2id], "weight": 1.0, "type": "FAKE_TAG_EDGE" } } } assert obj == baseline