def __init__(self, send_buffers, resources_required, label, constraints): PartitionedVertex.__init__(self, resources_required, label, constraints) RequiresRoutingInfoPartitionedVertex.__init__(self) SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__( self, send_buffers) self._base_key = None
def __init__( self, buffering_output, resources_required, label, constraints=None): """ :param buffering_output: True if the vertex is set to buffer output,\ False otherwise :param resources_required: The approximate resources needed for\ the vertex :type resources_required:\ :py:class:`pacman.models.resources.resource_container.ResourceContainer` :param label: The name of the subvertex :type label: str :param constraints: The constraints of the subvertex :type constraints: iterable of\ :py:class:`pacman.model.constraints.abstract_constraint\ .AbstractConstraint` :raise pacman.exceptions.PacmanInvalidParameterException: * If one of the constraints is not valid """ AbstractReceiveBuffersToHost.__init__(self) PartitionedVertex.__init__( self, resources_required=resources_required, label=label, constraints=constraints) self._buffering_output = buffering_output
def __init__(self, resources_required, label, constraints=None): PartitionedVertex.__init__(self, resources_required, label, constraints=constraints) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._DELAY_EXTENSION_REGIONS.PROVENANCE_REGION.value, 0)
def __init__(self, send_buffers, resources_required, label, constraints): PartitionedVertex.__init__(self, resources_required, label, constraints) RequiresRoutingInfoPartitionedVertex.__init__(self) SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__( self, send_buffers) AbstractEIEIOSpikeRecordable.__init__(self) self._base_key = None self._region_size = None
def __init__( self, resources_required, label, is_recording, constraints=None): PartitionedVertex.__init__( self, resources_required, label, constraints=constraints) ReceiveBuffersToHostBasicImpl.__init__(self) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._POISSON_SPIKE_SOURCE_REGIONS.PROVENANCE_REGION.value, 0) AbstractRecordable.__init__(self) self._is_recording = is_recording
def __init__( self, resources_required, label, is_recording, constraints=None): PartitionedVertex.__init__( self, resources_required, label, constraints) ReceiveBuffersToHostBasicImpl.__init__(self) ProvidesProvenanceDataFromMachineImpl.__init__( self, constants.POPULATION_BASED_REGIONS.PROVENANCE_DATA.value, self.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) AbstractRecordable.__init__(self) self._is_recording = is_recording
def __init__(self, resources_required, label, is_recording, constraints=None): PartitionedVertex.__init__(self, resources_required, label, constraints) ReceiveBuffersToHostBasicImpl.__init__(self) ProvidesProvenanceDataFromMachineImpl.__init__( self, constants.POPULATION_BASED_REGIONS.PROVENANCE_DATA.value, self.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) AbstractRecordable.__init__(self) self._is_recording = is_recording
def __init__(self, resources_required, label, spinnaker_link_id, constraints=None): PartitionedVertex.__init__(self, resources_required, label, constraints=constraints) self._spinnaker_link_id = spinnaker_link_id self._virtual_chip_x = None self._virtual_chip_y = None self._real_chip_x = None self._real_chip_y = None self._real_link = None
def create_subvertex(self, vertex_slice, resources_required, label=None, constraints=None): """ Create a subvertex of this vertex. Can be overridden in vertex\ subclasses to create an subvertex instance that contains detailed\ information :param label: The label to give the subvertex. If not given, and the\ vertex has no label, no label will be given to the\ subvertex. If not given and the vertex has a label, a\ default label will be given to the subvertex :type label: str :param resources_required: the SDRAM, DTCM, and ITCM used by this\ partitioned vertex :type resources_required:\ :py:class:`pacman.model.resources_resource_container.ResourceContainer` :param vertex_slice: the slice of the partitionable vertex that this\ partitioned vertex will cover :type vertex_slice: pacman.model.graph_mapper.vertex_slice.VertexSlice :param constraints: An iterable of constraints for the subvertex.\ These are generated by the partitioner from the vertex\ constraints. :type constraints: iterable of\ :py:class:`pacman.model.constraints.abstract_constraint.AbstractConstraint` :raise pacman.exceptions.PacmanInvalidParameterException: * If lo_atom or hi_atom are out of range * If one of the constraints is invalid """ return PartitionedVertex(label=label, resources_required=resources_required, constraints=constraints)
def __init__( self, label, machine_time_step, timescale_factor, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, ip_address=None, port=None, strip_sdp=None, board_address=None, tag=None, constraints=None): resources_required = ResourceContainer( cpu=CPUCyclesPerTickResource(self.get_cpu_usage()), dtcm=DTCMResource(self.get_dtcm_usage()), sdram=SDRAMResource(self.get_sdram_usage())) if constraints is None: constraints = self.get_constraints( ip_address, port, strip_sdp, board_address, tag) PartitionedVertex.__init__( self, resources_required, label, constraints=constraints) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._LIVE_DATA_GATHER_REGIONS.PROVENANCE.value, self.N_ADDITIONAL_PROVENANCE_ITEMS) AbstractPartitionedDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) self._use_prefix = use_prefix self._key_prefix = key_prefix self._prefix_type = prefix_type self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step
class TestBasicPlacer(unittest.TestCase): """ test for basic placement algorithum """ def setUp(self): ######################################################################## # Setting up vertices, edges and graph # ######################################################################## self.vert1 = TestVertex(100, "New AbstractConstrainedTestVertex 1") self.vert2 = TestVertex(5, "New AbstractConstrainedTestVertex 2") self.vert3 = TestVertex(3, "New AbstractConstrainedTestVertex 3") self.edge1 = MultiCastPartitionableEdge(self.vert1, self.vert2, "First edge") self.edge2 = MultiCastPartitionableEdge(self.vert2, self.vert1, "Second edge") self.edge3 = MultiCastPartitionableEdge(self.vert1, self.vert3, "Third edge") self.verts = [self.vert1, self.vert2, self.vert3] self.edges = [self.edge1, self.edge2, self.edge3] self.graph = PartitionableGraph("Graph", self.verts, self.edges) ######################################################################## # Setting up machine # ######################################################################## flops = 1000 (e, ne, n, w, sw, s) = range(6) processors = list() for i in range(18): processors.append(Processor(i, flops)) _sdram = SDRAM(128 * (2**20)) ip = "192.168.240.253" chips = list() for x in range(10): for y in range(10): links = list() links.append(Link(x, y, 0, (x + 1) % 10, y, n, n)) links.append(Link(x, y, 1, (x + 1) % 10, (y + 1) % 10, s, s)) links.append(Link(x, y, 2, x, (y + 1) % 10, n, n)) links.append(Link(x, y, 3, (x - 1) % 10, y, s, s)) links.append(Link(x, y, 4, (x - 1) % 10, (y - 1) % 10, n, n)) links.append(Link(x, y, 5, x, (y - 1) % 10, s, s)) r = Router(links, False, 100, 1024) chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip)) self.machine = Machine(chips) ######################################################################## # Setting up subgraph and graph_mapper # ######################################################################## self.subvertices = list() self.subvertex1 = PartitionedVertex( 0, 1, self.vert1.get_resources_used_by_atoms(0, 1, []), "First subvertex") self.subvertex2 = PartitionedVertex( 1, 5, get_resources_used_by_atoms(1, 5, []), "Second subvertex") self.subvertex3 = PartitionedVertex( 5, 10, get_resources_used_by_atoms(5, 10, []), "Third subvertex") self.subvertex4 = PartitionedVertex( 10, 100, get_resources_used_by_atoms(10, 100, []), "Fourth subvertex") self.subvertices.append(self.subvertex1) self.subvertices.append(self.subvertex2) self.subvertices.append(self.subvertex3) self.subvertices.append(self.subvertex4) self.subedges = list() self.subgraph = PartitionedGraph("Subgraph", self.subvertices, self.subedges) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(self.subvertices) @unittest.skip("demonstrating skipping") def test_new_basic_placer(self): self.bp = BasicPlacer(self.machine, self.graph) self.assertEqual(self.bp._machine, self.machine) self.assertEqual(self.bp._graph, self.graph) @unittest.skip("demonstrating skipping") def test_place_where_subvertices_dont_have_vertex(self): self.bp = BasicPlacer(self.machine, self.graph) placements = self.bp.place(self.subgraph, self.graph_mapper) for placement in placements.placements: print placement.subvertex.label, placement.subvertex.n_atoms, \ 'x:', placement.x, 'y:', placement.y, 'p:', placement.p @unittest.skip("demonstrating skipping") def test_place_where_subvertices_have_vertices(self): self.bp = BasicPlacer(self.machine, self.graph) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(self.subvertices, self.vert1) placements = self.bp.place(self.subgraph, self.graph_mapper) for placement in placements.placements: print placement.subvertex.label, placement.subvertex.n_atoms, \ 'x:', placement.x, 'y:', placement.y, 'p:', placement.p @unittest.skip("demonstrating skipping") def test_place_subvertex_too_big_with_vertex(self): large_vertex = TestVertex(500, "Large vertex 500") large_subvertex = large_vertex.create_subvertex( 0, 499, get_resources_used_by_atoms(0, 499, []))#PartitionedVertex(0, 499, "Large subvertex") self.graph.add_vertex(large_vertex) self.graph = PartitionableGraph("Graph",[large_vertex]) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices([large_subvertex], large_vertex) self.bp = BasicPlacer(self.machine, self.graph) self.subgraph = PartitionedGraph(subvertices=[large_subvertex]) with self.assertRaises(PacmanPlaceException): placements = self.bp.place(self.subgraph, self.graph_mapper) @unittest.skip("demonstrating skipping") def test_try_to_place(self): self.assertEqual(True, False, "Test not implemented yet") @unittest.skip("demonstrating skipping") def test_deal_with_constraint_placement_subvertices_dont_have_vertex(self): self.bp = BasicPlacer(self.machine, self.graph) self.subvertex1.add_constraint(PlacerChipAndCoreConstraint(8, 3, 2)) self.assertIsInstance(self.subvertex1.constraints[0], PlacerChipAndCoreConstraint) self.subvertex2.add_constraint(PlacerChipAndCoreConstraint(3, 5, 7)) self.subvertex3.add_constraint(PlacerChipAndCoreConstraint(2, 4, 6)) self.subvertex4.add_constraint(PlacerChipAndCoreConstraint(6, 4, 16)) self.subvertices = list() self.subvertices.append(self.subvertex1) self.subvertices.append(self.subvertex2) self.subvertices.append(self.subvertex3) self.subvertices.append(self.subvertex4) self.subedges = list() self.subgraph = PartitionedGraph("Subgraph", self.subvertices, self.subedges) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(self.subvertices) placements = self.bp.place(self.subgraph, self.graph_mapper) for placement in placements.placements: print placement.subvertex.label, placement.subvertex.n_atoms, \ 'x:', placement.x, 'y:', placement.y, 'p:', placement.p @unittest.skip("demonstrating skipping") def test_deal_with_constraint_placement_subvertices_have_vertices(self): self.bp = BasicPlacer(self.machine, self.graph) self.subvertex1.add_constraint(PlacerChipAndCoreConstraint(1, 5, 2)) self.assertIsInstance(self.subvertex1.constraints[0], PlacerChipAndCoreConstraint) self.subvertex2.add_constraint(PlacerChipAndCoreConstraint(3, 5, 7)) self.subvertex3.add_constraint(PlacerChipAndCoreConstraint(2, 4, 6)) self.subvertex4.add_constraint(PlacerChipAndCoreConstraint(6, 7, 16)) self.subvertices = list() self.subvertices.append(self.subvertex1) self.subvertices.append(self.subvertex2) self.subvertices.append(self.subvertex3) self.subvertices.append(self.subvertex4) self.subedges = list() self.subgraph = PartitionedGraph("Subgraph", self.subvertices, self.subedges) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(self.subvertices, self.vert1) placements = self.bp.place(self.subgraph, self.graph_mapper) for placement in placements.placements: print placement.subvertex.label, placement.subvertex.n_atoms, \ 'x:', placement.x, 'y:', placement.y, 'p:', placement.p @unittest.skip("demonstrating skipping") def test_unsupported_non_placer_constraint(self): self.assertEqual(True, False, "Test not implemented yet") @unittest.skip("demonstrating skipping") def test_unsupported_placer_constraint(self): self.assertEqual(True, False, "Test not implemented yet") @unittest.skip("demonstrating skipping") def test_unsupported_placer_constraints(self): self.assertEqual(True, False, "Test not implemented yet") @unittest.skip("demonstrating skipping") def test_many_subvertices(self): subvertices = list() for i in range(20 * 17): #50 atoms per each processor on 20 chips subvertices.append(PartitionedTestVertex( 0, 50, get_resources_used_by_atoms(0, 50, []), "PartitionedVertex " + str(i))) self.graph = PartitionableGraph("Graph",subvertices) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(subvertices) self.bp = BasicPlacer(self.machine, self.graph) self.subgraph = PartitionedGraph(subvertices=subvertices) placements = self.bp.place(self.subgraph, self.graph_mapper) for placement in placements.placements: print placement.subvertex.label, placement.subvertex.n_atoms, \ 'x:', placement.x, 'y:', placement.y, 'p:', placement.p @unittest.skip("demonstrating skipping") def test_too_many_subvertices(self): subvertices = list() for i in range(100 * 17): #50 atoms per each processor on 20 chips subvertices.append(PartitionedTestVertex( 0, 50, get_resources_used_by_atoms(0, 50, []), "PartitionedVertex " + str(i))) self.graph = PartitionableGraph("Graph",subvertices) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(subvertices) self.bp = BasicPlacer(self.machine, self.graph) self.subgraph = PartitionedGraph(subvertices=subvertices) with self.assertRaises(PacmanPlaceException): placements = self.bp.place(self.subgraph, self.graph_mapper) @unittest.skip("demonstrating skipping") def test_fill_machine(self): subvertices = list() for i in range(99 * 17): #50 atoms per each processor on 20 chips subvertices.append(PartitionedTestVertex( 0, 50, get_resources_used_by_atoms(0, 50, []), "PartitionedVertex " + str(i))) self.graph = PartitionableGraph("Graph",subvertices) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(subvertices) self.bp = BasicPlacer(self.machine, self.graph) self.subgraph = PartitionedGraph(subvertices=subvertices) placements = self.bp.place(self.subgraph, self.graph_mapper)
def setUp(self): ######################################################################## # Setting up vertices, edges and graph # ######################################################################## self.vert1 = TestVertex(100, "New AbstractConstrainedTestVertex 1") self.vert2 = TestVertex(5, "New AbstractConstrainedTestVertex 2") self.vert3 = TestVertex(3, "New AbstractConstrainedTestVertex 3") self.edge1 = MultiCastPartitionableEdge(self.vert1, self.vert2, "First edge") self.edge2 = MultiCastPartitionableEdge(self.vert2, self.vert1, "Second edge") self.edge3 = MultiCastPartitionableEdge(self.vert1, self.vert3, "Third edge") self.verts = [self.vert1, self.vert2, self.vert3] self.edges = [self.edge1, self.edge2, self.edge3] self.graph = PartitionableGraph("Graph", self.verts, self.edges) ######################################################################## # Setting up machine # ######################################################################## flops = 1000 (e, ne, n, w, sw, s) = range(6) processors = list() for i in range(18): processors.append(Processor(i, flops)) _sdram = SDRAM(128 * (2**20)) ip = "192.168.240.253" chips = list() for x in range(10): for y in range(10): links = list() links.append(Link(x, y, 0, (x + 1) % 10, y, n, n)) links.append(Link(x, y, 1, (x + 1) % 10, (y + 1) % 10, s, s)) links.append(Link(x, y, 2, x, (y + 1) % 10, n, n)) links.append(Link(x, y, 3, (x - 1) % 10, y, s, s)) links.append(Link(x, y, 4, (x - 1) % 10, (y - 1) % 10, n, n)) links.append(Link(x, y, 5, x, (y - 1) % 10, s, s)) r = Router(links, False, 100, 1024) chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip)) self.machine = Machine(chips) ######################################################################## # Setting up subgraph and graph_mapper # ######################################################################## self.subvertices = list() self.subvertex1 = PartitionedVertex( 0, 1, self.vert1.get_resources_used_by_atoms(0, 1, []), "First subvertex") self.subvertex2 = PartitionedVertex( 1, 5, get_resources_used_by_atoms(1, 5, []), "Second subvertex") self.subvertex3 = PartitionedVertex( 5, 10, get_resources_used_by_atoms(5, 10, []), "Third subvertex") self.subvertex4 = PartitionedVertex( 10, 100, get_resources_used_by_atoms(10, 100, []), "Fourth subvertex") self.subvertices.append(self.subvertex1) self.subvertices.append(self.subvertex2) self.subvertices.append(self.subvertex3) self.subvertices.append(self.subvertex4) self.subedges = list() self.subgraph = PartitionedGraph("Subgraph", self.subvertices, self.subedges) self.graph_mapper = GraphMapper() self.graph_mapper.add_subvertices(self.subvertices)
def __init__(self, n_neurons, machine_time_step, timescale_factor, port, label, board_address=None, virtual_key=None, check_key=True, prefix=None, prefix_type=None, tag=None, key_left_shift=0, sdp_port=1, buffer_space=0, notify_buffer_space=False, space_before_notification=640, notification_tag=None, notification_ip_address=None, notification_port=None, notification_strip_sdp=True, constraints=None): if n_neurons > ReverseIpTagMultiCastSource._max_atoms_per_core: raise Exception("This model can currently only cope with {} atoms" .format(ReverseIpTagMultiCastSource ._max_atoms_per_core)) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__( self, n_neurons, label, ReverseIpTagMultiCastSource._max_atoms_per_core, constraints) PartitionedVertex.__init__( self, label=label, resources_required=ResourceContainer( cpu=CPUCyclesPerTickResource(123), dtcm=DTCMResource(123), sdram=SDRAMResource(123))) self.add_constraint(TagAllocatorRequireReverseIptagConstraint( port, sdp_port, board_address, tag)) if notify_buffer_space: self.add_constraint(TagAllocatorRequireIptagConstraint( notification_ip_address, notification_port, notification_strip_sdp, board_address, notification_tag)) # set params self._port = port self._virtual_key = virtual_key self._prefix = prefix self._check_key = check_key self._prefix_type = prefix_type self._key_left_shift = key_left_shift self._buffer_space = buffer_space self._space_before_notification = space_before_notification self._notify_buffer_space = notify_buffer_space # validate params if self._prefix is not None and self._prefix_type is None: raise ConfigurationException( "To use a prefix, you must declaire which position to use the " "prefix in on the prefix_type parameter.") if virtual_key is not None: self._mask, max_key = self._calculate_mask(n_neurons) # key =( key ored prefix )and mask temp_vertual_key = virtual_key if self._prefix is not None: if self._prefix_type == EIEIOPrefix.LOWER_HALF_WORD: temp_vertual_key |= self._prefix if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD: temp_vertual_key |= (self._prefix << 16) else: self._prefix = self._generate_prefix(virtual_key, prefix_type) if temp_vertual_key is not None: # check that mask key combo = key masked_key = temp_vertual_key & self._mask if self._virtual_key != masked_key: raise ConfigurationException( "The mask calculated from your number of neurons has " "the potential to interfere with the key, please " "reduce the number of neurons or reduce the virtual" " key") # check that neuron mask does not interfere with key if self._virtual_key < 0: raise ConfigurationException( "Virtual keys must be positive") if n_neurons > max_key: raise ConfigurationException( "The mask calculated from your number of neurons has " "the capability to interfere with the key due to its " "size please reduce the number of neurons or reduce " "the virtual key") if self._key_left_shift > 16 or self._key_left_shift < 0: raise ConfigurationException( "the key left shift must be within a range of " "0 and 16. Please change this param and try again") # add placement constraint placement_constraint = PlacerRadialPlacementFromChipConstraint(0, 0) self.add_constraint(placement_constraint)
def __init__(self, lo_atom, hi_atom, resources_required, label=None, constraints=None): PartitionedVertex.__init__(self, lo_atom, hi_atom, resources_required, label=label, constraints=constraints) self._model_based_max_atoms_per_core = 256
def __init__(self, resources_required, label, constraints=None): PartitionedVertex.__init__( self, resources_required, label, constraints=constraints) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._DELAY_EXTENSION_REGIONS.PROVENANCE_REGION.value, 6)
def __init__( self, n_keys, resources_required, machine_time_step, timescale_factor, label, constraints=None, # General input and output parameters board_address=None, # Live input parameters receive_port=None, receive_sdp_port=( constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value), receive_tag=None, # Key parameters virtual_key=None, prefix=None, prefix_type=None, check_keys=False, # Send buffer parameters send_buffer_times=None, send_buffer_max_space=( constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP), send_buffer_space_before_notify=640, send_buffer_notification_ip_address=None, send_buffer_notification_port=None, send_buffer_notification_tag=None): """ :param n_keys: The number of keys to be sent via this multicast source :param resources_required: The resources required by the vertex :param machine_time_step: The time step to be used on the machine :param timescale_factor: The time scaling to be used in the simulation :param label: The label of this vertex :param constraints: Any initial constraints to this vertex :param board_address: The IP address of the board on which to place\ this vertex if receiving data, either buffered or live (by\ default, any board is chosen) :param receive_port: The port on the board that will listen for\ incoming event packets (default is to disable this feature;\ set a value to enable it) :param receive_sdp_port: The SDP port to listen on for incoming event\ packets (defaults to 1) :param receive_tag: The IP tag to use for receiving live events\ (uses any by default) :param virtual_key: The base multicast key to send received events\ with (assigned automatically by default) :param prefix: The prefix to "or" with generated multicast keys\ (default is no prefix) :param prefix_type: Whether the prefix should apply to the upper or\ lower half of the multicast keys (default is upper half) :param check_keys: True if the keys of received events should be\ verified before sending (default False) :param send_buffer_times: An array of arrays of times at which keys\ should be sent (one array for each key, default disabled) :param send_buffer_max_space: The maximum amount of space to use of\ the SDRAM on the machine (default is 1MB) :param send_buffer_space_before_notify: The amount of space free in\ the sending buffer before the machine will ask the host for\ more data (default setting is optimised for most cases) :param send_buffer_notification_ip_address: The IP address of the host\ that will send new buffers (must be specified if a send buffer\ is specified) :param send_buffer_notification_port: The port that the host that will\ send new buffers is listening on (must be specified if a\ send buffer is specified) :param send_buffer_notification_tag: The IP tag to use to notify the\ host about space in the buffer (default is to use any tag) """ # Set up super types PartitionedVertex.__init__( self, resources_required, label, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._REGIONS.PROVENANCE_REGION.value, 0) AbstractProvidesOutgoingPartitionConstraints.__init__(self) ReceiveBuffersToHostBasicImpl.__init__(self) # Set up for receiving live packets if receive_port is not None: self.add_constraint(TagAllocatorRequireReverseIptagConstraint( receive_port, receive_sdp_port, board_address, receive_tag)) # Work out if buffers are being sent self._first_machine_time_step = 0 self._send_buffer = None if send_buffer_times is None: self._send_buffer_times = None SendsBuffersFromHostPreBufferedImpl.__init__( self, None) else: self._send_buffer = BufferedSendingRegion(send_buffer_max_space) self._send_buffer_times = send_buffer_times self.add_constraint(TagAllocatorRequireIptagConstraint( send_buffer_notification_ip_address, send_buffer_notification_port, True, board_address, send_buffer_notification_tag)) SendsBuffersFromHostPreBufferedImpl.__init__( self, {self._REGIONS.SEND_BUFFER.value: self._send_buffer}) # buffered out parameters self._send_buffer_space_before_notify = send_buffer_space_before_notify self._send_buffer_notification_ip_address = \ send_buffer_notification_ip_address self._send_buffer_notification_port = send_buffer_notification_port self._send_buffer_notification_tag = send_buffer_notification_tag if self._send_buffer_space_before_notify > send_buffer_max_space: self._send_buffer_space_before_notify = send_buffer_max_space # Set up for recording (if requested) self._record_buffer_size = 0 self._buffer_size_before_receive = 0 # set flag for checking if in injection mode self._in_injection_mode = receive_port is not None # Sort out the keys to be used self._n_keys = n_keys self._virtual_key = virtual_key self._mask = None self._prefix = prefix self._prefix_type = prefix_type self._check_keys = check_keys # Work out the prefix details if self._prefix is not None: if self._prefix_type is None: self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD: self._prefix = prefix << 16 # If the user has specified a virtual key if self._virtual_key is not None: # check that virtual key is valid if self._virtual_key < 0: raise ConfigurationException( "Virtual keys must be positive") # Get a mask and maximum number of keys for the number of keys # requested self._mask, max_key = self._calculate_mask(n_keys) # Check that the number of keys and the virtual key don't interfere if n_keys > max_key: raise ConfigurationException( "The mask calculated from the number of keys will " "not work with the virtual key specified") if self._prefix is not None: # Check that the prefix doesn't change the virtual key in the # masked area masked_key = (self._virtual_key | self._prefix) & self._mask if self._virtual_key != masked_key: raise ConfigurationException( "The number of keys, virtual key and key prefix" " settings don't work together") else: # If no prefix was generated, generate one self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD self._prefix = self._virtual_key
def __init__( self, n_keys, resources_required, machine_time_step, timescale_factor, label, constraints=None, # General input and output parameters board_address=None, # Live input parameters receive_port=None, receive_sdp_port=( constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value), receive_tag=None, # Key parameters virtual_key=None, prefix=None, prefix_type=None, check_keys=False, # Send buffer parameters send_buffer_times=None, send_buffer_max_space=( constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP), send_buffer_space_before_notify=640, send_buffer_notification_ip_address=None, send_buffer_notification_port=None, send_buffer_notification_tag=None): """ :param n_keys: The number of keys to be sent via this multicast source :param resources_required: The resources required by the vertex :param machine_time_step: The time step to be used on the machine :param timescale_factor: The time scaling to be used in the simulation :param label: The label of this vertex :param constraints: Any initial constraints to this vertex :param board_address: The IP address of the board on which to place\ this vertex if receiving data, either buffered or live (by\ default, any board is chosen) :param receive_port: The port on the board that will listen for\ incoming event packets (default is to disable this feature;\ set a value to enable it) :param receive_sdp_port: The SDP port to listen on for incoming event\ packets (defaults to 1) :param receive_tag: The IP tag to use for receiving live events\ (uses any by default) :param virtual_key: The base multicast key to send received events\ with (assigned automatically by default) :param prefix: The prefix to "or" with generated multicast keys\ (default is no prefix) :param prefix_type: Whether the prefix should apply to the upper or\ lower half of the multicast keys (default is upper half) :param check_keys: True if the keys of received events should be\ verified before sending (default False) :param send_buffer_times: An array of arrays of times at which keys\ should be sent (one array for each key, default disabled) :param send_buffer_max_space: The maximum amount of space to use of\ the SDRAM on the machine (default is 1MB) :param send_buffer_space_before_notify: The amount of space free in\ the sending buffer before the machine will ask the host for\ more data (default setting is optimised for most cases) :param send_buffer_notification_ip_address: The IP address of the host\ that will send new buffers (must be specified if a send buffer\ is specified) :param send_buffer_notification_port: The port that the host that will\ send new buffers is listening on (must be specified if a\ send buffer is specified) :param send_buffer_notification_tag: The IP tag to use to notify the\ host about space in the buffer (default is to use any tag) """ # Set up super types AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) PartitionedVertex.__init__( self, resources_required, label, constraints) AbstractProvidesOutgoingEdgeConstraints.__init__(self) ReceiveBuffersToHostBasicImpl.__init__(self) # Set up for receiving live packets if receive_port is not None: self.add_constraint(TagAllocatorRequireReverseIptagConstraint( receive_port, receive_sdp_port, board_address, receive_tag)) # Work out if buffers are being sent self._first_machine_time_step = 0 self._send_buffer = None if send_buffer_times is None: self._send_buffer_times = None SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__( self, None) else: self._send_buffer = BufferedSendingRegion(send_buffer_max_space) self._send_buffer_times = send_buffer_times self.add_constraint(TagAllocatorRequireIptagConstraint( send_buffer_notification_ip_address, send_buffer_notification_port, True, board_address, send_buffer_notification_tag)) SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__( self, {self._REGIONS.SEND_BUFFER.value: self._send_buffer}) # buffered out parameters self._send_buffer_space_before_notify = send_buffer_space_before_notify self._send_buffer_notification_ip_address = \ send_buffer_notification_ip_address self._send_buffer_notification_port = send_buffer_notification_port self._send_buffer_notification_tag = send_buffer_notification_tag if self._send_buffer_space_before_notify > send_buffer_max_space: self._send_buffer_space_before_notify = send_buffer_max_space # Set up for recording (if requested) self._record_buffer_size = 0 self._buffer_size_before_receive = 0 # Sort out the keys to be used self._n_keys = n_keys self._virtual_key = virtual_key self._mask = None self._prefix = prefix self._prefix_type = prefix_type self._check_keys = check_keys # Work out the prefix details if self._prefix is not None: if self._prefix_type is None: self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD: self._prefix = prefix << 16 # If the user has specified a virtual key if self._virtual_key is not None: # check that virtual key is valid if self._virtual_key < 0: raise ConfigurationException( "Virtual keys must be positive") # Get a mask and maximum number of keys for the number of keys # requested self._mask, max_key = self._calculate_mask(n_keys) # Check that the number of keys and the virtual key don't interfere if n_keys > max_key: raise ConfigurationException( "The mask calculated from the number of keys will " "not work with the virtual key specified") if self._prefix is not None: # Check that the prefix doesn't change the virtual key in the # masked area masked_key = (self._virtual_key | self._prefix) & self._mask if self._virtual_key != masked_key: raise ConfigurationException( "The number of keys, virtual key and key prefix" " settings don't work together") else: # If no prefix was generated, generate one self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD self._prefix = self._virtual_key
def __init__(self, machine_time_step, timescale_factor, ip_address, port, board_address=None, tag=None, strip_sdp=True, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, constraints=None, label=None): """ """ if ((message_type == EIEIOType.KEY_PAYLOAD_32_BIT or message_type == EIEIOType.KEY_PAYLOAD_16_BIT) and use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as " "payload to each key, not both") if ((message_type == EIEIOType.KEY_32_BIT or message_type == EIEIOType.KEY_16_BIT) and not use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as" " payload to each key, but current configuration does not " "specify either of these") if (not isinstance(prefix_type, EIEIOPrefix) and prefix_type is not None): raise ConfigurationException( "the type of a prefix type should be of a EIEIOPrefix, " "which can be located in :" "spinnman.messages.eieio.eieio_prefix_type") if label is None: label = "Live Packet Gatherer" AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__(self, n_atoms=1, label=label, max_atoms_per_core=1, constraints=constraints) AbstractProvidesProvenanceData.__init__(self) PartitionedVertex.__init__( self, label=label, resources_required=ResourceContainer( cpu=CPUCyclesPerTickResource( self.get_cpu_usage_for_atoms(1, None)), dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(1, None)), sdram=SDRAMResource(self.get_sdram_usage_for_atoms(1, None)))) # Try to place this near the Ethernet self.add_constraint(PlacerRadialPlacementFromChipConstraint(0, 0)) # Add the IP Tag requirement self.add_constraint(TagAllocatorRequireIptagConstraint( ip_address, port, strip_sdp, board_address, tag)) self._prefix_type = prefix_type self._use_prefix = use_prefix self._key_prefix = key_prefix self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step
def __init__(self, send_buffers, resources_required, label, constraints): PartitionedVertex.__init__(self, resources_required, label, constraints) SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__( self, send_buffers)