def test_virtual_vertices_spreader(): """ Test that the placer works with a virtual vertex """ # Create a graph with a virtual vertex machine_graph = MachineGraph("Test") virtual_vertex = MachineSpiNNakerLinkVertex( spinnaker_link_id=0, label="Virtual") machine_graph.add_vertex(virtual_vertex) # These vertices are fixed on 0, 0 misc_vertices = list() for i in range(3): misc_vertex = SimpleMachineVertex( resources=ResourceContainer(), constraints=[ ChipAndCoreConstraint(0, 0)], label="Fixed_0_0_{}".format(i)) machine_graph.add_vertex(misc_vertex) misc_vertices.append(misc_vertex) # These vertices are 1-1 connected to the virtual vertex one_to_one_vertices = list() for i in range(16): one_to_one_vertex = SimpleMachineVertex( resources=ResourceContainer(), label="Vertex_{}".format(i)) machine_graph.add_vertex(one_to_one_vertex) edge = MachineEdge(virtual_vertex, one_to_one_vertex) machine_graph.add_edge(edge, "SPIKES") one_to_one_vertices.append(one_to_one_vertex) n_keys_map = DictBasedMachinePartitionNKeysMap() partition = machine_graph.get_outgoing_edge_partition_starting_at_vertex( virtual_vertex, "SPIKES") n_keys_map.set_n_keys_for_partition(partition, 1) # Get and extend the machine for the virtual chip machine = virtual_machine(width=8, height=8) extended_machine = MallocBasedChipIdAllocator()(machine, machine_graph) # Do placements placements = SpreaderPlacer()( machine_graph, extended_machine, n_keys_map, plan_n_timesteps=1000) # The virtual vertex should be on a virtual chip placement = placements.get_placement_of_vertex(virtual_vertex) assert machine.get_chip_at(placement.x, placement.y).virtual # The 0, 0 vertices should be on 0, 0 for vertex in misc_vertices: placement = placements.get_placement_of_vertex(vertex) assert placement.x == placement.y == 0 # The other vertices should *not* be on a virtual chip for vertex in one_to_one_vertices: placement = placements.get_placement_of_vertex(vertex) assert not machine.get_chip_at(placement.x, placement.y).virtual
def test_one_to_one(): """ Test normal 1-1 placement """ # Create a graph machine_graph = MachineGraph("Test") # Connect a set of vertices in a chain of length 3 one_to_one_chains = list() for i in range(10): last_vertex = None chain = list() for j in range(3): vertex = SimpleMachineVertex(resources=ResourceContainer(), label="Vertex_{}_{}".format(i, j)) machine_graph.add_vertex(vertex) if last_vertex is not None: edge = MachineEdge(last_vertex, vertex) machine_graph.add_edge(edge, "SPIKES") last_vertex = vertex chain.append(vertex) one_to_one_chains.append(chain) # Connect a set of 20 vertices in a chain too_many_vertices = list() last_vertex = None for i in range(20): vertex = SimpleMachineVertex(resources=ResourceContainer(), label="Vertex_{}".format(i)) machine_graph.add_vertex(vertex) if last_vertex is not None: edge = MachineEdge(last_vertex, vertex) machine_graph.add_edge(edge, "SPIKES") too_many_vertices.append(vertex) last_vertex = vertex # Do placements machine = virtual_machine(width=8, height=8) placements = OneToOnePlacer()(machine_graph, machine, plan_n_timesteps=1000) # The 1-1 connected vertices should be on the same chip for chain in one_to_one_chains: first_placement = placements.get_placement_of_vertex(chain[0]) for i in range(1, 3): placement = placements.get_placement_of_vertex(chain[i]) assert placement.x == first_placement.x assert placement.y == first_placement.y # The other vertices should be on more than one chip too_many_chips = set() for vertex in too_many_vertices: placement = placements.get_placement_of_vertex(vertex) too_many_chips.add((placement.x, placement.y)) assert len(too_many_chips) > 1
def test_sdram_links(): """ Test sdram edges which should explode """ # Create a graph machine_graph = MachineGraph("Test") # Connect a set of vertices in a chain of length 3 last_vertex = None for x in range(20): vertex = SimpleMachineVertex( resources=ResourceContainer(), label="Vertex_{}".format(x), sdram_cost=20) machine_graph.add_vertex(vertex) last_vertex = vertex for vertex in machine_graph.vertices: machine_graph.add_outgoing_edge_partition( ConstantSDRAMMachinePartition( identifier="SDRAM", pre_vertex=vertex, label="bacon")) edge = SDRAMMachineEdge(vertex, last_vertex, "bacon", app_edge=None) machine_graph.add_edge(edge, "SDRAM") n_keys_map = DictBasedMachinePartitionNKeysMap() # Do placements machine = virtual_machine(width=8, height=8) try: SpreaderPlacer()(machine_graph, machine, n_keys_map, plan_n_timesteps=1000) raise Exception("should blow up here") except PacmanException: pass
def resources_required(self): # system region sdram = constants.SYSTEM_BYTES_REQUIREMENT # params + cilia + inner ear + seeds + sdram edge + DT elements + # synapse sdram_params = (self._N_PARAMETERS + self._N_CILIA_PARAMS + self._N_DT_PARAMS + +self._N_INNER_EAR_PARAM_PARAMS + self._N_SDRAM_EDGE_PARAMS + self.N_SEEDS_PER_IHCAN_VERTEX) sdram += sdram_params * constants.WORD_TO_BYTE_MULTIPLIER # profile region sdram += self._profile_size() # provenance region sdram += self.get_provenance_data_size( self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PROVENANCE_ELEMENTS.value) # recording region # recording stuff sdram += self._ihcan_neuron_recorder.get_sdram_usage_in_bytes( self._ihcan_recording_atom_slice) variable_sdram = self._ihcan_neuron_recorder.get_variable_sdram_usage( self._ihcan_recording_atom_slice) resources = ResourceContainer(dtcm=DTCMResource(0), sdram=variable_sdram, cpu_cycles=CPUCyclesPerTickResource(0), iptags=[], reverse_iptags=[]) return resources
def get_maximum_resources_available(self, chips=None, processor_id=None, board_address=None, ip_tags=None, reverse_ip_tags=None): """ Get the maximum resources available :param chips: An iterable of (x, y) tuples of chips that are to be used :type chips: iterable of (int, int) :param processor_id: the processor id :type processor_id: int :param board_address: the board address for locating max resources from :type board_address: str :param ip_tags: iterable of ip tag constraints :type ip_tags: iterable of\ :py:class:`pacman.model.constraints.tag_allocator_constraints.tag_allocator_require_iptag_constraint.TagAllocatorRequireIptagConstraint` :param reverse_ip_tags: iterable of reverse ip tag constraints :type reverse_ip_tags: iterable of\ :py:class:`pacman.model.constraints.tag_allocator_constraints.tag_allocator_require_reverse_iptag_constraint.TagAllocatorRequireReverseIptagConstraint` :return: a resource which shows max resources available :rtype: ResourceContainer """ usable_chips = self._get_usable_chips(chips, board_address, ip_tags, reverse_ip_tags) # If the chip is not fixed, find the maximum SDRAM # TODO: Also check for the best core max_sdram_available = 0 max_dtcm_available = 0 max_cpu_available = 0 for (chip_x, chip_y) in usable_chips: key = (chip_x, chip_y) chip = self._machine.get_chip_at(chip_x, chip_y) sdram_available = self._sdram_available(chip, key) ip_tags_available = self._are_ip_tags_available( chip, board_address, ip_tags) reverse_ip_tags_available = self._are_reverse_ip_tags_available( chip, board_address, reverse_ip_tags) if (sdram_available > max_sdram_available and ip_tags_available and reverse_ip_tags_available): max_sdram_available = sdram_available best_processor_id = self._best_core_available( chip, key, processor_id) processor = chip.get_processor_with_id(best_processor_id) max_dtcm_available = processor.dtcm_available max_cpu_available = processor.cpu_cycles_available # If all the SDRAM on the chip is available, # this chip is unallocated, so the max must be the max # TODO: This assumes that the chips are all the same if sdram_available == chip.sdram.size: break # Send the maximums return ResourceContainer(DTCMResource(max_dtcm_available), SDRAMResource(max_sdram_available), CPUCyclesPerTickResource(max_cpu_available))
def get_resources_used_by_atoms(self, vertex_slice): # **HACK** only way to force no partitioning is to zero dtcm and cpu container = ResourceContainer(sdram=SDRAMResource( self.BANDIT_REGION_BYTES + front_end_common_constants.SYSTEM_BYTES_REQUIREMENT), dtcm=DTCMResource(0), cpu_cycles=CPUCyclesPerTickResource(0)) return container
def resources_required(self): sdram = self._N_PARAMETER_BYTES + self._data_size sdram += 1 * self._KEY_ELEMENT_TYPE.size resources = ResourceContainer(dtcm=DTCMResource(0), sdram=SDRAMResource(sdram), cpu_cycles=CPUCyclesPerTickResource(0), iptags=[], reverse_iptags=[]) return resources
def resources_required(self, graph, default_machine_time_step): # system region sdram = constants.SYSTEM_BYTES_REQUIREMENT # sdram edge address store sdram += (self.SDRAM_EDGE_ADDRESS_SIZE_IN_WORDS * constants.WORD_TO_BYTE_MULTIPLIER) # bitfields bitfield region sdram += bit_field_utilities.get_estimated_sdram_for_bit_field_region( graph, self) # bitfield key map region sdram += bit_field_utilities.get_estimated_sdram_for_key_region( graph, self) # bitfield builder region sdram += bit_field_utilities.exact_sdram_for_bit_field_builder_region() # the actual size needed by sdram edge sdram += self._sdram_edge_size # filter params sdram += self.FILTER_PARAMS_IN_BYTES # params sdram += self._N_PARAMETER_BYTES # double params sdram += self._N_DOUBLE_PARAMS_BYTES # profile sdram += self._profile_size() # synapses sdram += self._synapse_manager.get_sdram_usage_in_bytes( Slice(self._drnl_index, self._drnl_index + 1), graph.get_edges_ending_at_vertex(self._parent), default_machine_time_step) # recording stuff sdram += self._neuron_recorder.get_sdram_usage_in_bytes( Slice(self._drnl_index, self._drnl_index)) variable_sdram = self._neuron_recorder.get_variable_sdram_usage( Slice(self._drnl_index, self._drnl_index)) # find variable sdram resources = ResourceContainer( dtcm=DTCMResource(0), sdram=variable_sdram + ConstantSDRAM(sdram), cpu_cycles=CPUCyclesPerTickResource(0), iptags=[], reverse_iptags=[]) return resources
def get_resources_used_by_atoms(self, vertex_slice, graph): """ Get the separate resource requirements for a range of atoms :param vertex_slice: the low value of atoms to calculate resources from :param graph: A reference to the graph containing this vertex. :type vertex_slice: pacman.model.graph_mapper.slice.Slice :return: a Resource container that contains a \ CPUCyclesPerTickResource, DTCMResource and SDRAMResource :rtype: ResourceContainer :raise None: this method does not raise any known exception """ cpu_cycles = self.get_cpu_usage_for_atoms(vertex_slice, graph) dtcm_requirement = self.get_dtcm_usage_for_atoms(vertex_slice, graph) sdram_requirement = self.get_sdram_usage_for_atoms(vertex_slice, graph) # noinspection PyTypeChecker resources = ResourceContainer(cpu=CPUCyclesPerTickResource(cpu_cycles), dtcm=DTCMResource(dtcm_requirement), sdram=SDRAMResource(sdram_requirement)) return resources
def __init__( self, label, machine_time_step, timescale_factor, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, ip_address=None, port=None, strip_sdp=None, board_address=None, tag=None, constraints=None): resources_required = ResourceContainer( cpu=CPUCyclesPerTickResource(self.get_cpu_usage()), dtcm=DTCMResource(self.get_dtcm_usage()), sdram=SDRAMResource(self.get_sdram_usage())) if constraints is None: constraints = self.get_constraints( ip_address, port, strip_sdp, board_address, tag) PartitionedVertex.__init__( self, resources_required, label, constraints=constraints) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._LIVE_DATA_GATHER_REGIONS.PROVENANCE.value, self.N_ADDITIONAL_PROVENANCE_ITEMS) AbstractPartitionedDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) self._use_prefix = use_prefix self._key_prefix = key_prefix self._prefix_type = prefix_type self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step
def resources_required(self): # system sdram = constants.SYSTEM_BYTES_REQUIREMENT # params sdram += self._N_PARAMETER_BYTES # concha params sdram += self._N_CONCHA_PARAMS_BYTES # data sdram += self._data_size # filter coeffs sdram += self._N_FILTER_COEFFS_BYTES # profile sdram += self._profile_size() # provenance region sdram += self.get_provenance_data_size( self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PROVENANCE_ELEMENTS.value) resources = ResourceContainer(dtcm=DTCMResource(0), sdram=ConstantSDRAM(sdram), cpu_cycles=CPUCyclesPerTickResource(0), iptags=[], reverse_iptags=[]) return resources
def resources_required(self): resources = ResourceContainer( sdram=ConstantSDRAM(SYSTEM_BYTES_REQUIREMENT + self._sdram_usage)) return resources
def get_resources_used_by_atoms(self, vertex_slice): return ResourceContainer()
def resources_required(self): resources = ResourceContainer(sdram=ConstantSDRAM(self.sdram_fixed)) return resources
def resources_required(self): return ResourceContainer()