def _place_vertex(self, vertex, resource_tracker, machine, placements): # Check for the radial placement constraint radial_constraints = utility_calls.locate_constraints_of_type( [vertex], PlacerRadialPlacementFromChipConstraint) start_x = None start_y = None for constraint in radial_constraints: if start_x is None: start_x = constraint.x elif start_x != constraint.x: raise PacmanPlaceException("Non-matching constraints") if start_y is None: start_y = constraint.y elif start_y != constraint.y: raise PacmanPlaceException("Non-matching constraints") chips = None if start_x is not None and start_y is not None: chips = self._generate_radial_chips(machine, resource_tracker, start_x, start_y) # Create and store a new placement (x, y, p, _, _) = resource_tracker.allocate_constrained_resources( vertex.resources_required, vertex.constraints, chips) placement = Placement(vertex, x, y, p) placements.add_placement(placement)
def convert_from_rig_placements(rig_placements, rig_allocations, partitioned_graph): placements = Placements() for vertex_id in rig_placements: vertex = partitioned_graph.get_subvertex_by_id(vertex_id) if vertex is not None: if isinstance(vertex, VirtualPartitionedVertex): placements.add_placement( Placement(vertex, vertex.virtual_chip_x, vertex.virtual_chip_y, None)) else: x, y = rig_placements[vertex_id] p = rig_allocations[vertex_id]["cores"].start placements.add_placement(Placement(vertex, x, y, p)) return placements
def _allocate_individual(subvertex, placements, progress_bar, resource_tracker): # Create and store a new placement anywhere on the board (x, y, p, _, _) = resource_tracker.\ allocate_constrained_resources( subvertex.resources_required, subvertex.constraints) placement = Placement(subvertex, x, y, p) placements.add_placement(placement) progress_bar.update()
def _do_allocation(self, ordered_subverts, placements, machine): # Iterate over subvertices and generate placements progress_bar = ProgressBar(len(ordered_subverts), "Placing graph vertices") resource_tracker = ResourceTracker( machine, self._generate_radial_chips(machine)) # iterate over subverts for subvertex_list in ordered_subverts: # if too many one to ones to fit on a chip, allocate individually if len(subvertex_list) > self.MAX_CORES_PER_CHIP_TO_CONSIDER: for subvertex in subvertex_list: self._allocate_individual(subvertex, placements, progress_bar, resource_tracker) else: # can allocate in one block # merge constraints placement_constraint, ip_tag_constraints, \ reverse_ip_tag_constraints = \ self._merge_constraints(subvertex_list) # locate most cores on a chip max_size_on_a_chip = resource_tracker.\ max_available_cores_on_chips_that_satisfy( placement_constraint, ip_tag_constraints, reverse_ip_tag_constraints) # if size fits block allocate, otherwise allocate individually if max_size_on_a_chip < len(subvertex_list): # collect resource requirement resources = list() for subvert in subvertex_list: resources.append(subvert.resources_required) # get cores cores = resource_tracker.allocate_group( resources, placement_constraint, ip_tag_constraints, reverse_ip_tag_constraints) # allocate cores to subverts for subvert, (x, y, p, _, _) in zip(subvertex_list, cores): placement = Placement(subvert, x, y, p) placements.add_placement(placement) progress_bar.update() else: for subvertex in subvertex_list: self._allocate_individual(subvertex, placements, progress_bar, resource_tracker) progress_bar.end()
def test_retrieve_synaptic_block(self): default_config_paths = os.path.join( os.path.dirname(abstract_spinnaker_common.__file__), AbstractSpiNNakerCommon.CONFIG_FILE_NAME) config = conf_loader.load_config( AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) key = 0 synaptic_manager = SynapticManager( synapse_type=None, ring_buffer_sigma=5.0, spikes_per_second=100.0, config=config, population_table_type=MockMasterPopulationTable( {key: [(1, 0, False)]}), synapse_io=MockSynapseIO()) transceiver = MockTransceiverRawData(bytearray(16)) placement = Placement(None, 0, 0, 1) first_block, row_len_1 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=0, indirect_synapses_address=0, direct_synapses_address=0, key=key, n_rows=1, index=0, using_extra_monitor_cores=False) same_block, row_len_1_2 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=0, indirect_synapses_address=0, direct_synapses_address=0, key=key, n_rows=1, index=0, using_extra_monitor_cores=False) synaptic_manager.clear_connection_cache() different_block, row_len_2 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=0, indirect_synapses_address=0, direct_synapses_address=0, key=key, n_rows=1, index=0, using_extra_monitor_cores=False) # Check that the row lengths are all the same assert row_len_1 == row_len_1_2 assert row_len_1 == row_len_2 # Check that the block retrieved twice without reset is cached assert id(first_block) == id(same_block) # Check that the block after reset is not a copy assert id(first_block) != id(different_block)
def test_retrieve_direct_block(self): default_config_paths = os.path.join( os.path.dirname(abstract_spinnaker_common.__file__), AbstractSpiNNakerCommon.CONFIG_FILE_NAME) config = conf_loader.load_config( AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) key = 0 n_rows = 2 direct_matrix = bytearray(struct.pack("<IIII", 1, 2, 3, 4)) direct_matrix_1_expanded = bytearray( struct.pack("<IIIIIIII", 0, 1, 0, 1, 0, 1, 0, 2)) direct_matrix_2_expanded = bytearray( struct.pack("<IIIIIIII", 0, 1, 0, 3, 0, 1, 0, 4)) synaptic_manager = SynapticManager( synapse_type=None, ring_buffer_sigma=5.0, spikes_per_second=100.0, config=config, population_table_type=MockMasterPopulationTable( {key: [(1, 0, True), (1, n_rows * 4, True)]}), synapse_io=MockSynapseIO()) transceiver = MockTransceiverRawData(direct_matrix) placement = Placement(None, 0, 0, 1) data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=0, indirect_synapses_address=0, direct_synapses_address=0, key=key, n_rows=n_rows, index=0, using_extra_monitor_cores=False) data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=0, indirect_synapses_address=0, direct_synapses_address=0, key=key, n_rows=n_rows, index=1, using_extra_monitor_cores=False) # Row lengths should be 1 assert row_len_1 == 1 assert row_len_2 == 1 # Check the data retrieved assert data_1 == direct_matrix_1_expanded assert data_2 == direct_matrix_2_expanded
def __call__(self, partitioned_graph, machine): """ Place a partitioned_graph so that each subvertex is placed on a\ core :param partitioned_graph: The partitioned_graph to place :type partitioned_graph:\ :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph` :return: A set of placements :rtype: :py:class:`pacman.model.placements.placements.Placements` :raise pacman.exceptions.PacmanPlaceException: If something\ goes wrong with the placement """ # check that the algorithm can handle the constraints utility_calls.check_algorithm_can_support_constraints( constrained_vertices=partitioned_graph.subvertices, supported_constraints=[PlacerChipAndCoreConstraint], abstract_constraint_type=AbstractPlacerConstraint) placements = Placements() ordered_subverts = utility_calls.sort_objects_by_constraint_authority( partitioned_graph.subvertices) # Iterate over subvertices and generate placements progress_bar = ProgressBar(len(ordered_subverts), "Placing graph vertices") resource_tracker = ResourceTracker(machine) for subvertex in ordered_subverts: # Create and store a new placement anywhere on the board (x, y, p, _, _) = resource_tracker.allocate_constrained_resources( subvertex.resources_required, subvertex.constraints) placement = Placement(subvertex, x, y, p) placements.add_placement(placement) progress_bar.update() progress_bar.end() return {'placements': placements}
def test_memory_io(): vertex = MyVertex() graph = MachineGraph("Test") graph.add_vertex(vertex) placements = Placements() placements.add_placement(Placement(vertex, 0, 0, 1)) transceiver = _MockTransceiver() temp = tempfile.mkdtemp() print("ApplicationDataFolder = {}".format(temp)) inputs = { "MemoryTransceiver": transceiver, "MemoryMachineGraph": graph, "MemoryPlacements": placements, "IPAddress": "testing", "ApplicationDataFolder": temp, "APPID": 30 } algorithms = ["WriteMemoryIOData"] executor = PACMANAlgorithmExecutor( algorithms, [], inputs, [], [], [], xml_paths=get_front_end_common_pacman_xml_paths()) executor.execute_mapping() assert (vertex._test_tag == vertex._tag)
"C:\\Python27\\lib\\site-packages\\spynnaker\\pyNN\\model_binaries\\IF_curr_exp.aplx", CoreSubsets([ CoreSubset(0, 0, [ 2, ]), ])) binaries.add_subsets( "C:\\Python27\\lib\\site-packages\\spinn_front_end_common\\common_model_binaries\\reverse_iptag_multicast_source.aplx", CoreSubsets([ CoreSubset(0, 0, [ 1, ]), ])) vertex = ReloadBufferedVertex("inputSpikes_On:0:255", [(2, "inputSpikes_On_0_255_2", 1048576)]) buffered_placements.add_placement(Placement(vertex, 0, 0, 1)) buffered_tags.add_ip_tag(IPTag("192.168.240.253", 0, "0.0.0.0", 17896, True), vertex) reloader = Reload(machine_name, machine_version, reports_states, bmp_details, down_chips, down_cores, number_of_boards, height, width, auto_detect_bmp, enable_reinjection) if len(socket_addresses) > 0: reloader.execute_notification_protocol_read_messages( socket_addresses, None, os.path.join(os.path.dirname(os.path.abspath(__file__)), "input_output_database.db")) reloader.reload_application_data(application_data) reloader.reload_routes(routing_tables) reloader.reload_tags(iptags, reverse_iptags) reloader.reload_binaries(binaries)
def test_write_synaptic_matrix_and_master_population_table(self): MockSimulator.setup() default_config_paths = os.path.join( os.path.dirname(abstract_spinnaker_common.__file__), AbstractSpiNNakerCommon.CONFIG_FILE_NAME) config = conf_loader.load_config( AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40) machine_time_step = 1000.0 pre_app_vertex = SimpleApplicationVertex(10) pre_vertex = SimpleMachineVertex(resources=None) pre_vertex_slice = Slice(0, 9) post_app_vertex = SimpleApplicationVertex(10) post_vertex = SimpleMachineVertex(resources=None) post_vertex_slice = Slice(0, 9) post_slice_index = 0 one_to_one_connector_1 = OneToOneConnector(None) one_to_one_connector_1.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) one_to_one_connector_1.set_weights_and_delays(1.5, 1.0) one_to_one_connector_2 = OneToOneConnector(None) one_to_one_connector_2.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) one_to_one_connector_2.set_weights_and_delays(2.5, 2.0) all_to_all_connector = AllToAllConnector(None) all_to_all_connector.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) all_to_all_connector.set_weights_and_delays(4.5, 4.0) direct_synapse_information_1 = SynapseInformation( one_to_one_connector_1, SynapseDynamicsStatic(), 0) direct_synapse_information_2 = SynapseInformation( one_to_one_connector_2, SynapseDynamicsStatic(), 1) all_to_all_synapse_information = SynapseInformation( all_to_all_connector, SynapseDynamicsStatic(), 0) app_edge = ProjectionApplicationEdge( pre_app_vertex, post_app_vertex, direct_synapse_information_1) app_edge.add_synapse_information(direct_synapse_information_2) app_edge.add_synapse_information(all_to_all_synapse_information) machine_edge = ProjectionMachineEdge( app_edge.synapse_information, pre_vertex, post_vertex) partition_name = "TestPartition" graph = MachineGraph("Test") graph.add_vertex(pre_vertex) graph.add_vertex(post_vertex) graph.add_edge(machine_edge, partition_name) graph_mapper = GraphMapper() graph_mapper.add_vertex_mapping( pre_vertex, pre_vertex_slice, pre_app_vertex) graph_mapper.add_vertex_mapping( post_vertex, post_vertex_slice, post_app_vertex) graph_mapper.add_edge_mapping(machine_edge, app_edge) weight_scales = [4096.0, 4096.0] key = 0 routing_info = RoutingInfo() routing_info.add_partition_info(PartitionRoutingInfo( [BaseKeyAndMask(key, 0xFFFFFFF0)], graph.get_outgoing_edge_partition_starting_at_vertex( pre_vertex, partition_name))) temp_spec = tempfile.mktemp() spec_writer = FileDataWriter(temp_spec) spec = DataSpecificationGenerator(spec_writer, None) master_pop_sz = 1000 master_pop_region = 0 all_syn_block_sz = 2000 synapse_region = 1 spec.reserve_memory_region(master_pop_region, master_pop_sz) spec.reserve_memory_region(synapse_region, all_syn_block_sz) synapse_type = MockSynapseType() synaptic_manager = SynapticManager( synapse_type=synapse_type, ring_buffer_sigma=5.0, spikes_per_second=100.0, config=config) synaptic_manager._write_synaptic_matrix_and_master_population_table( spec, [post_vertex_slice], post_slice_index, post_vertex, post_vertex_slice, all_syn_block_sz, weight_scales, master_pop_region, synapse_region, routing_info, graph_mapper, graph, machine_time_step) spec.end_specification() spec_writer.close() spec_reader = FileDataReader(temp_spec) executor = DataSpecificationExecutor( spec_reader, master_pop_sz + all_syn_block_sz) executor.execute() master_pop_table = executor.get_region(0) synaptic_matrix = executor.get_region(1) all_data = bytearray() all_data.extend(master_pop_table.region_data[ :master_pop_table.max_write_pointer]) all_data.extend(synaptic_matrix.region_data[ :synaptic_matrix.max_write_pointer]) master_pop_table_address = 0 synaptic_matrix_address = master_pop_table.max_write_pointer direct_synapses_address = struct.unpack_from( "<I", synaptic_matrix.region_data)[0] direct_synapses_address += synaptic_matrix_address + 8 indirect_synapses_address = synaptic_matrix_address + 4 placement = Placement(None, 0, 0, 1) transceiver = MockTransceiverRawData(all_data) # Get the master population table details items = synaptic_manager._poptable_type\ .extract_synaptic_matrix_data_location( key, master_pop_table_address, transceiver, placement.x, placement.y) # The first entry should be direct, but the rest should be indirect; # the second is potentially direct, but has been restricted by the # restriction on the size of the direct matrix assert len(items) == 3 # TODO: This has been changed because direct matrices are disabled! assert not items[0][2] assert not items[1][2] assert not items[2][2] data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=0, using_extra_monitor_cores=False) connections_1 = synaptic_manager._synapse_io.read_synapses( direct_synapse_information_1, pre_vertex_slice, post_vertex_slice, row_len_1, 0, 2, weight_scales, data_1, None, app_edge.n_delay_stages, machine_time_step) # The first matrix is a 1-1 matrix, so row length is 1 assert row_len_1 == 1 # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms assert all([conn["weight"] == 1.5 for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=1, using_extra_monitor_cores=False) connections_2 = synaptic_manager._synapse_io.read_synapses( direct_synapse_information_2, pre_vertex_slice, post_vertex_slice, row_len_2, 0, 2, weight_scales, data_2, None, app_edge.n_delay_stages, machine_time_step) # The second matrix is a 1-1 matrix, so row length is 1 assert row_len_2 == 1 # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms assert all([conn["weight"] == 2.5 for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=2, using_extra_monitor_cores=False) connections_3 = synaptic_manager._synapse_io.read_synapses( all_to_all_synapse_information, pre_vertex_slice, post_vertex_slice, row_len_3, 0, 2, weight_scales, data_3, None, app_edge.n_delay_stages, machine_time_step) # The third matrix is an all-to-all matrix, so length is n_atoms assert row_len_3 == post_vertex_slice.n_atoms # Check that all the connections have the right weight and delay assert len(connections_3) == \ post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms assert all([conn["weight"] == 4.5 for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3])
def placement_from_json(json_dict, graph=None): vertex = vertex_lookup(json_dict["vertex_label"], graph) return Placement(vertex, int(json_dict["x"]), int(json_dict["y"]), int(json_dict["p"]))
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ]), ])) vertex = ReloadBufferedVertex("Population 2:0:0", [(2, "Population 2_0_0_2", 1048576)]) buffered_placements.add_placement(Placement(vertex, 0, 0, 1)) buffered_tags.add_ip_tag(IPTag("192.168.240.253", 0, "0.0.0.0", 17896, True), vertex) vertex = ReloadBufferedVertex("Population 3:0:0", [(2, "Population 3_0_0_2", 1048576)]) buffered_placements.add_placement(Placement(vertex, 0, 0, 2)) buffered_tags.add_ip_tag(IPTag("192.168.240.253", 0, "0.0.0.0", 17896, True), vertex) vertex = ReloadBufferedVertex("Population 6:0:0", [(2, "Population 6_0_0_2", 1048576)]) buffered_placements.add_placement(Placement(vertex, 0, 0, 3)) buffered_tags.add_ip_tag(IPTag("192.168.240.253", 0, "0.0.0.0", 17896, True), vertex) vertex = ReloadBufferedVertex("Population 7:0:0", [(2, "Population 7_0_0_2", 1048576)]) buffered_placements.add_placement(Placement(vertex, 0, 0, 4))
def __call__(self, placements, allocations, partitioned_graph, extended_machine, constraints): """ :param placements: :param allocations: :param partitioned_graph: :param extended_machine: :param constraints: :return: """ # load the json files file_placements, core_allocations, constraints = \ self._load_json_files(placements, allocations, constraints) # validate the json files against the schemas self._validate_file_read_data(file_placements, core_allocations, constraints) # drop the type and allocations bit of core allocations # (makes lower code simpler) core_allocations = core_allocations['allocations'] memory_placements = Placements() # process placements for vertex_id in file_placements: subvertex = partitioned_graph.get_subvertex_by_id(vertex_id) if vertex_id not in core_allocations: if subvertex is not None: # virtual chip or tag chip constraints_for_vertex = self._locate_constraints( vertex_id, constraints) external_device_constraints = \ self._valid_constraints_for_external_device( constraints_for_vertex) if len(external_device_constraints) != 0: # get data for virtual chip route_constraint = \ external_device_constraints['end_point'] route_direction = constants.EDGES( route_constraint['direction'].upper()) placement_constraint = \ external_device_constraints['placement'] coords = placement_constraint['location'] # locate virtual chip link = extended_machine.get_chip_at( coords[0], coords[1]).router.get_link(route_direction.value) destination_chip = extended_machine.get_chip_at( link.destination_x, link.destination_y) # create placement placements.add_placement( Placement(subvertex, destination_chip.x, destination_chip.y, None)) else: raise exceptions.PacmanConfigurationException( "I don't recognise this pattern of constraints for" " a vertex which does not have a placement") else: if subvertex is None: raise exceptions.PacmanConfigurationException( "Failed to locate the partitioned vertex in the " "partitioned graph with label {}".format(vertex_id)) else: memory_placements.add_placement( Placement(x=file_placements[vertex_id][0], y=file_placements[vertex_id][1], p=core_allocations[vertex_id][0], subvertex=subvertex)) # return the file format return {"placements": memory_placements}