def __init__( self, n_neurons, machine_time_step, timescale_factor, spinnaker_link_id, speed=30, sample_time=4096, update_time=512, delay_time=5, delta_threshold=23, continue_if_not_different=True, label="RobotMotorControl"): """ """ if n_neurons != 6: logger.warn("The specified number of neurons for the munich motor" " device has been ignored; 6 will be used instead") AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__(self, 6, label, 6, None) AbstractVertexWithEdgeToDependentVertices.__init__( self, [_MunichMotorDevice(spinnaker_link_id)], None) AbstractProvidesOutgoingEdgeConstraints.__init__(self) self._speed = speed self._sample_time = sample_time self._update_time = update_time self._delay_time = delay_time self._delta_threshold = delta_threshold self._continue_if_not_different = continue_if_not_different
def __init__( self, n_neurons, machine_time_step, timescale_factor, constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0, duration=None, seed=None): AbstractPartitionableVertex.__init__( self, n_atoms=n_neurons, label=label, constraints=constraints, max_atoms_per_core=self._model_based_max_atoms_per_core) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractSpikeRecordable.__init__(self) ReceiveBuffersToHostBasicImpl.__init__(self) AbstractProvidesOutgoingEdgeConstraints.__init__(self) PopulationSettableChangeRequiresMapping.__init__(self) # Store the parameters self._rate = rate self._start = start self._duration = duration self._rng = numpy.random.RandomState(seed) # Prepare for recording, and to get spikes self._spike_recorder = SpikeRecorder(machine_time_step) self._spike_buffer_max_size = config.getint( "Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint( "Buffers", "time_between_requests")
def __init__(self, n_neurons, machine_time_step, timescale_factor, constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0, duration=None, seed=None): """ Creates a new SpikeSourcePoisson Object. """ AbstractPartitionableVertex.__init__( self, n_atoms=n_neurons, label=label, constraints=constraints, max_atoms_per_core=self._model_based_max_atoms_per_core) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractSpikeRecordable.__init__(self) # Store the parameters self._rate = rate self._start = start self._duration = duration self._rng = numpy.random.RandomState(seed) # Prepare for recording, and to get spikes self._spike_recorder = SpikeRecorder(machine_time_step) self._outgoing_edge_key_restrictor = \ OutgoingEdgeSameContiguousKeysRestrictor()
def __init__(self, n_neurons, delay_per_stage, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ AbstractPartitionableVertex.__init__(self, n_neurons, label, 256, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesNKeysForPartition.__init__(self) self._source_vertex = source_vertex self._n_delay_stages = 0 self._delay_per_stage = delay_per_stage # Dictionary of vertex_slice -> delay block for data specification self._delay_blocks = dict() self.add_constraint( PartitionerSameSizeAsVertexConstraint(source_vertex))
def __init__(self, n_neurons, max_delay_per_neuron, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ AbstractPartitionableVertex.__init__(self, n_atoms=n_neurons, constraints=constraints, label=label, max_atoms_per_core=256) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractProvidesIncomingEdgeConstraints.__init__(self) AbstractOutgoingEdgeSameContiguousKeysRestrictor.__init__(self) self._max_delay_per_neuron = max_delay_per_neuron self._max_stages = 0 self._source_vertex = source_vertex joint_constrant = PartitionerSameSizeAsVertexConstraint(source_vertex) self.add_constraint(joint_constrant)
def __init__(self, n_neurons, machine_time_step, timescale_factor, spinnaker_link_id, speed=30, sample_time=4096, update_time=512, delay_time=5, delta_threshold=23, continue_if_not_different=True, label="RobotMotorControl"): """ """ if n_neurons != 6: logger.warn("The specified number of neurons for the munich motor" " device has been ignored; 6 will be used instead") AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__(self, 6, label, 6, None) AbstractVertexWithEdgeToDependentVertices.__init__( self, [_MunichMotorDevice(spinnaker_link_id)], None) AbstractProvidesOutgoingPartitionConstraints.__init__(self) self._speed = speed self._sample_time = sample_time self._update_time = update_time self._delay_time = delay_time self._delta_threshold = delta_threshold self._continue_if_not_different = continue_if_not_different
def __init__(self, n_atoms, label, max_atoms_per_core, machine_time_step, timescale_factor, constraints=None): AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__( self, n_atoms, label, constraints=constraints, max_atoms_per_core=max_atoms_per_core)
def __init__(self, n_atoms, spinnaker_link_id, label, max_atoms_per_core): AbstractPartitionableVertex.__init__(self, n_atoms, label, max_atoms_per_core) # set up virtual data structures self._virtual_chip_x = None self._virtual_chip_y = None self._spinnaker_link_id = spinnaker_link_id
def __init__(self, n_neurons, spike_times, machine_time_step, spikes_per_second, ring_buffer_sigma, timescale_factor, port=None, tag=None, ip_address=None, board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=None, space_before_notification=640, constraints=None, label="SpikeSourceArray"): if ip_address is None: ip_address = config.get("Buffers", "receive_buffer_host") if port is None: port = config.getint("Buffers", "receive_buffer_port") AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__( self, n_atoms=n_neurons, label=label, max_atoms_per_core=self._model_based_max_atoms_per_core, constraints=constraints) AbstractOutgoingEdgeSameContiguousKeysRestrictor.__init__(self) self._spike_times = spike_times self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification self.add_constraint( TagAllocatorRequireIptagConstraint(ip_address, port, strip_sdp=True, board_address=board_address, tag_id=tag)) if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = \ front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") # Keep track of any previously generated buffers self._send_buffers = dict()
def __init__( self, n_neurons, spike_times, machine_time_step, timescale_factor, port=None, tag=None, ip_address=None, board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=None, space_before_notification=640, constraints=None, label="SpikeSourceArray"): if ip_address is None: ip_address = config.get("Buffers", "receive_buffer_host") if port is None: port = config.getint("Buffers", "receive_buffer_port") AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__( self, n_atoms=n_neurons, label=label, max_atoms_per_core=self._model_based_max_atoms_per_core, constraints=constraints) AbstractSpikeRecordable.__init__(self) self._spike_times = spike_times self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification self.add_constraint(TagAllocatorRequireIptagConstraint( ip_address, port, strip_sdp=True, board_address=board_address, tag_id=tag)) if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = \ front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") if (self._max_on_chip_memory_usage_for_spikes < self._space_before_notification): self._space_before_notification =\ self._max_on_chip_memory_usage_for_spikes # Keep track of any previously generated buffers self._send_buffers = dict() self._spike_recording_region_size = None # handle recording self._spike_recorder = EIEIOSpikeRecorder(machine_time_step) #handle outgoing constraints self._outgoing_edge_key_restrictor = \ OutgoingEdgeSameContiguousKeysRestrictor()
def __init__(self, n_atoms, spinnaker_link_id, label, max_atoms_per_core): AbstractPartitionableVertex.__init__(self, n_atoms, label, max_atoms_per_core) # set up virtual data structures self._virtual_chip_x = None self._virtual_chip_y = None self._real_chip_x = None self._real_chip_y = None self._real_link = None self._spinnaker_link_id = spinnaker_link_id
def __init__(self, machine_time_step, timescale_factor): AbstractProvidesOutgoingEdgeConstraints.__init__(self) AbstractPartitionableVertex.__init__(self, 1, "Command Sender", 1) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) self._edge_constraints = dict() self._command_edge = dict() self._times_with_commands = set() self._commands_with_payloads = dict() self._commands_without_payloads = dict()
def __init__(self, machine_time_step, timescale_factor): AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractPartitionableVertex.__init__( self, 1, "Command Sender", 1) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) self._edge_constraints = dict() self._command_edge = dict() self._times_with_commands = set() self._commands_with_payloads = dict() self._commands_without_payloads = dict()
def __init__(self, n_atoms, virtual_chip_x, virtual_chip_y, spinnaker_link_id, label, max_atoms_per_core): AbstractPartitionableVertex.__init__(self, n_atoms, label, max_atoms_per_core) # set up virtual data structures self._virtual_chip_x = virtual_chip_x self._virtual_chip_y = virtual_chip_y self._spinnaker_link_id = spinnaker_link_id placement_constaint = \ PlacerChipAndCoreConstraint(self._virtual_chip_x, self._virtual_chip_y) self.add_constraint(placement_constaint)
def __init__( self, n_neurons, spike_times, machine_time_step, spikes_per_second, ring_buffer_sigma, timescale_factor, port=None, tag=None, ip_address=None, board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=None, constraints=None, label="SpikeSourceArray"): if ip_address is None: ip_address = config.get("Buffers", "receive_buffer_host") if port is None: port = config.getint("Buffers", "receive_buffer_port") AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__( self, n_atoms=n_neurons, label=label, max_atoms_per_core=self._model_based_max_atoms_per_core, constraints=constraints) AbstractOutgoingEdgeSameContiguousKeysRestrictor.__init__(self) self._spike_times = spike_times self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._threshold_for_reporting_bytes_written = 0 self.add_constraint(TagAllocatorRequireIptagConstraint( ip_address, port, strip_sdp=True, board_address=board_address, tag_id=tag)) if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = 8 * 1024 * 1024 # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") # Keep track of any previously generated buffers self._send_buffers = dict()
def __init__(self, n_neurons, max_delay_per_neuron, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ AbstractPartitionableVertex.__init__(self, n_atoms=n_neurons, constraints=constraints, label=label, max_atoms_per_core=256) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractProvidesIncomingEdgeConstraints.__init__(self) AbstractProvidesNKeysForEdge.__init__(self) self._max_delay_per_neuron = max_delay_per_neuron self._max_stages = 0 self._source_vertex = source_vertex joint_constrant = PartitionerSameSizeAsVertexConstraint(source_vertex) self.add_constraint(joint_constrant)
def __init__( self, n_neurons, machine_time_step, timescale_factor, constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0, duration=None, seed=None): AbstractPartitionableVertex.__init__( self, n_neurons, label, self._model_based_max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) PopulationSettableChangeRequiresMapping.__init__(self) # Store the parameters self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons) self._start = utility_calls.convert_param_to_numpy(start, n_neurons) self._duration = utility_calls.convert_param_to_numpy( duration, n_neurons) self._rng = numpy.random.RandomState(seed) # Prepare for recording, and to get spikes self._spike_recorder = MultiSpikeRecorder(machine_time_step) self._spike_buffer_max_size = config.getint( "Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint( "Buffers", "time_between_requests") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") self._receive_buffer_host = config.get( "Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint( "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint( "Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume")
def __init__( self, n_neurons, machine_time_step, timescale_factor, constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0, duration=None, seed=None): AbstractPartitionableVertex.__init__( self, n_neurons, label, self._model_based_max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) PopulationSettableChangeRequiresMapping.__init__(self) # Store the parameters self._rate = rate self._start = start self._duration = duration self._rng = numpy.random.RandomState(seed) # Prepare for recording, and to get spikes self._spike_recorder = SpikeRecorder(machine_time_step) self._spike_buffer_max_size = config.getint( "Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint( "Buffers", "time_between_requests") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") self._receive_buffer_host = config.get( "Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint( "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint( "Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume")
def __init__(self, n_neurons, delay_per_stage, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ AbstractPartitionableVertex.__init__( self, n_neurons, label, 256, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesNKeysForPartition.__init__(self) self._source_vertex = source_vertex self._n_delay_stages = 0 self._delay_per_stage = delay_per_stage # Dictionary of vertex_slice -> delay block for data specification self._delay_blocks = dict() self.add_constraint( PartitionerSameSizeAsVertexConstraint(source_vertex))
def __init__(self, n_neurons, machine_time_step, timescale_factor, port, label, board_address=None, virtual_key=None, check_key=True, prefix=None, prefix_type=None, tag=None, key_left_shift=0, sdp_port=1, buffer_space=0, notify_buffer_space=False, space_before_notification=640, notification_tag=None, notification_ip_address=None, notification_port=None, notification_strip_sdp=True, constraints=None): if n_neurons > ReverseIpTagMultiCastSource._max_atoms_per_core: raise Exception("This model can currently only cope with {} atoms" .format(ReverseIpTagMultiCastSource ._max_atoms_per_core)) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__( self, n_neurons, label, ReverseIpTagMultiCastSource._max_atoms_per_core, constraints) PartitionedVertex.__init__( self, label=label, resources_required=ResourceContainer( cpu=CPUCyclesPerTickResource(123), dtcm=DTCMResource(123), sdram=SDRAMResource(123))) self.add_constraint(TagAllocatorRequireReverseIptagConstraint( port, sdp_port, board_address, tag)) if notify_buffer_space: self.add_constraint(TagAllocatorRequireIptagConstraint( notification_ip_address, notification_port, notification_strip_sdp, board_address, notification_tag)) # set params self._port = port self._virtual_key = virtual_key self._prefix = prefix self._check_key = check_key self._prefix_type = prefix_type self._key_left_shift = key_left_shift self._buffer_space = buffer_space self._space_before_notification = space_before_notification self._notify_buffer_space = notify_buffer_space # validate params if self._prefix is not None and self._prefix_type is None: raise ConfigurationException( "To use a prefix, you must declaire which position to use the " "prefix in on the prefix_type parameter.") if virtual_key is not None: self._mask, max_key = self._calculate_mask(n_neurons) # key =( key ored prefix )and mask temp_vertual_key = virtual_key if self._prefix is not None: if self._prefix_type == EIEIOPrefix.LOWER_HALF_WORD: temp_vertual_key |= self._prefix if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD: temp_vertual_key |= (self._prefix << 16) else: self._prefix = self._generate_prefix(virtual_key, prefix_type) if temp_vertual_key is not None: # check that mask key combo = key masked_key = temp_vertual_key & self._mask if self._virtual_key != masked_key: raise ConfigurationException( "The mask calculated from your number of neurons has " "the potential to interfere with the key, please " "reduce the number of neurons or reduce the virtual" " key") # check that neuron mask does not interfere with key if self._virtual_key < 0: raise ConfigurationException( "Virtual keys must be positive") if n_neurons > max_key: raise ConfigurationException( "The mask calculated from your number of neurons has " "the capability to interfere with the key due to its " "size please reduce the number of neurons or reduce " "the virtual key") if self._key_left_shift > 16 or self._key_left_shift < 0: raise ConfigurationException( "the key left shift must be within a range of " "0 and 16. Please change this param and try again") # add placement constraint placement_constraint = PlacerRadialPlacementFromChipConstraint(0, 0) self.add_constraint(placement_constraint)
def __init__(self, n_atoms, label, max_atoms_per_core=256): AbstractPartitionableVertex.__init__(self, n_atoms=n_atoms, max_atoms_per_core=max_atoms_per_core, label=label) self._model_based_max_atoms_per_core = max_atoms_per_core
def __init__(self, machine_time_step, timescale_factor, ip_address, port, board_address=None, tag=None, strip_sdp=True, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, constraints=None, label=None): """ """ if ((message_type == EIEIOType.KEY_PAYLOAD_32_BIT or message_type == EIEIOType.KEY_PAYLOAD_16_BIT) and use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as " "payload to each key, not both") if ((message_type == EIEIOType.KEY_32_BIT or message_type == EIEIOType.KEY_16_BIT) and not use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as" " payload to each key, but current configuration does not " "specify either of these") if (not isinstance(prefix_type, EIEIOPrefix) and prefix_type is not None): raise ConfigurationException( "the type of a prefix type should be of a EIEIOPrefix, " "which can be located in :" "spinnman.messages.eieio.eieio_prefix_type") if label is None: label = "Live Packet Gatherer" AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__(self, n_atoms=1, label=label, max_atoms_per_core=1, constraints=constraints) AbstractProvidesProvenanceData.__init__(self) PartitionedVertex.__init__( self, label=label, resources_required=ResourceContainer( cpu=CPUCyclesPerTickResource( self.get_cpu_usage_for_atoms(1, None)), dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(1, None)), sdram=SDRAMResource(self.get_sdram_usage_for_atoms(1, None)))) # Try to place this near the Ethernet self.add_constraint(PlacerRadialPlacementFromChipConstraint(0, 0)) # Add the IP Tag requirement self.add_constraint(TagAllocatorRequireIptagConstraint( ip_address, port, strip_sdp, board_address, tag)) self._prefix_type = prefix_type self._use_prefix = use_prefix self._key_prefix = key_prefix self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step
def __init__(self, n_atoms, label): AbstractPartitionableVertex.__init__(self, label=label, n_atoms=n_atoms, max_atoms_per_core=256)
def __init__( self, n_keys, machine_time_step, timescale_factor, label=None, constraints=None, max_atoms_per_core=sys.maxint, # General parameters board_address=None, # Live input parameters receive_port=None, receive_sdp_port=(constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value), receive_tag=None, # Key parameters virtual_key=None, prefix=None, prefix_type=None, check_keys=False, # Send buffer parameters send_buffer_times=None, send_buffer_max_space=(constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP), send_buffer_space_before_notify=640, send_buffer_notification_ip_address=None, send_buffer_notification_port=None, send_buffer_notification_tag=None, ): """ :param n_keys: The number of keys to be sent via this multicast source :param machine_time_step: The time step to be used on the machine :param timescale_factor: The time scaling to be used in the simulation :param label: The label of this vertex :param constraints: Any initial constraints to this vertex :param board_address: The IP address of the board on which to place\ this vertex if receiving data, either buffered or live (by\ default, any board is chosen) :param receive_port: The port on the board that will listen for\ incoming event packets (default is to disable this feature;\ set a value to enable it) :param receive_sdp_port: The SDP port to listen on for incoming event\ packets (defaults to 1) :param receive_tag: The IP tag to use for receiving live events\ (uses any by default) :param virtual_key: The base multicast key to send received events\ with (assigned automatically by default) :param prefix: The prefix to "or" with generated multicast keys\ (default is no prefix) :param prefix_type: Whether the prefix should apply to the upper or\ lower half of the multicast keys (default is upper half) :param check_keys: True if the keys of received events should be\ verified before sending (default False) :param send_buffer_times: An array of arrays of times at which keys\ should be sent (one array for each key, default disabled) :param send_buffer_max_space: The maximum amount of space to use of\ the SDRAM on the machine (default is 1MB) :param send_buffer_space_before_notify: The amount of space free in\ the sending buffer before the machine will ask the host for\ more data (default setting is optimised for most cases) :param send_buffer_notification_ip_address: The IP address of the host\ that will send new buffers (must be specified if a send buffer\ is specified) :param send_buffer_notification_port: The port that the host that will\ send new buffers is listening on (must be specified if a\ send buffer is specified) :param send_buffer_notification_tag: The IP tag to use to notify the\ host about space in the buffer (default is to use any tag) """ AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__(self, n_keys, label, max_atoms_per_core, constraints) # Store the parameters self._board_address = board_address self._receive_port = receive_port self._receive_sdp_port = receive_sdp_port self._receive_tag = receive_tag self._virtual_key = virtual_key self._prefix = prefix self._prefix_type = prefix_type self._check_keys = check_keys self._send_buffer_times = send_buffer_times self._send_buffer_max_space = send_buffer_max_space self._send_buffer_space_before_notify = send_buffer_space_before_notify self._send_buffer_notification_ip_address = send_buffer_notification_ip_address self._send_buffer_notification_port = send_buffer_notification_port self._send_buffer_notification_tag = send_buffer_notification_tag # Store recording parameters for later self._recording_enabled = False self._record_buffering_ip_address = None self._record_buffering_port = None self._record_buffering_board_address = None self._record_buffering_tag = None self._record_buffer_size = 0 self._record_buffer_size_before_receive = 0 # Keep the subvertices for resuming runs self._subvertices = list() self._first_machine_time_step = 0
def __init__(self, n_neurons, binary, label, max_atoms_per_core, machine_time_step, timescale_factor, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, model_name, neuron_model, input_type, synapse_type, threshold_type, additional_input=None, constraints=None): AbstractPartitionableVertex.__init__(self, n_neurons, label, max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractVRecordable.__init__(self) AbstractGSynRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesIncomingPartitionConstraints.__init__(self) AbstractPopulationInitializable.__init__(self) AbstractPopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) self._binary = binary self._label = label self._machine_time_step = machine_time_step self._timescale_factor = timescale_factor self._incoming_spike_buffer_size = incoming_spike_buffer_size if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size") self._model_name = model_name self._neuron_model = neuron_model self._input_type = input_type self._threshold_type = threshold_type self._additional_input = additional_input # Set up for recording self._spike_recorder = SpikeRecorder(machine_time_step) self._v_recorder = VRecorder(machine_time_step) self._gsyn_recorder = GsynRecorder(machine_time_step) self._spike_buffer_max_size = config.getint("Buffers", "spike_buffer_size") self._v_buffer_max_size = config.getint("Buffers", "v_buffer_size") self._gsyn_buffer_max_size = config.getint("Buffers", "gsyn_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint("Buffers", "time_between_requests") self._minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") self._receive_buffer_host = config.get("Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint("Buffers", "receive_buffer_port") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") # Set up synapse handling self._synapse_manager = SynapticManager(synapse_type, machine_time_step, ring_buffer_sigma, spikes_per_second) # bool for if state has changed. self._change_requires_mapping = True
def __init__( self, n_neurons, binary, label, max_atoms_per_core, machine_time_step, timescale_factor, spikes_per_second, ring_buffer_sigma, model_name, neuron_model, input_type, synapse_type, threshold_type, additional_input=None, constraints=None): ReceiveBuffersToHostBasicImpl.__init__(self) AbstractPartitionableVertex.__init__( self, n_neurons, label, max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractVRecordable.__init__(self) AbstractGSynRecordable.__init__(self) AbstractProvidesOutgoingEdgeConstraints.__init__(self) AbstractProvidesIncomingEdgeConstraints.__init__(self) AbstractPopulationInitializable.__init__(self) AbstractPopulationSettable.__init__(self) AbstractMappable.__init__(self) self._binary = binary self._label = label self._machine_time_step = machine_time_step self._timescale_factor = timescale_factor self._model_name = model_name self._neuron_model = neuron_model self._input_type = input_type self._threshold_type = threshold_type self._additional_input = additional_input # Set up for recording self._spike_recorder = SpikeRecorder(machine_time_step) self._v_recorder = VRecorder(machine_time_step) self._gsyn_recorder = GsynRecorder(machine_time_step) self._spike_buffer_max_size = config.getint( "Buffers", "spike_buffer_size") self._v_buffer_max_size = config.getint( "Buffers", "v_buffer_size") self._gsyn_buffer_max_size = config.getint( "Buffers", "gsyn_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint( "Buffers", "time_between_requests") # Set up synapse handling self._synapse_manager = SynapticManager( synapse_type, machine_time_step, ring_buffer_sigma, spikes_per_second) # Get buffering information for later use self._receive_buffer_host = config.get( "Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint( "Buffers", "receive_buffer_port") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") # bool for if state has changed. self._change_requires_mapping = True
def __init__( self, n_keys, machine_time_step, timescale_factor, label=None, constraints=None, max_atoms_per_core=sys.maxint, # General parameters board_address=None, # Live input parameters receive_port=None, receive_sdp_port=( constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value), receive_tag=None, # Key parameters virtual_key=None, prefix=None, prefix_type=None, check_keys=False, # Send buffer parameters send_buffer_times=None, send_buffer_max_space=( constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP), send_buffer_space_before_notify=640, send_buffer_notification_ip_address=None, send_buffer_notification_port=None, send_buffer_notification_tag=None): """ :param n_keys: The number of keys to be sent via this multicast source :param machine_time_step: The time step to be used on the machine :param timescale_factor: The time scaling to be used in the simulation :param label: The label of this vertex :param constraints: Any initial constraints to this vertex :param board_address: The IP address of the board on which to place\ this vertex if receiving data, either buffered or live (by\ default, any board is chosen) :param receive_port: The port on the board that will listen for\ incoming event packets (default is to disable this feature;\ set a value to enable it) :param receive_sdp_port: The SDP port to listen on for incoming event\ packets (defaults to 1) :param receive_tag: The IP tag to use for receiving live events\ (uses any by default) :param virtual_key: The base multicast key to send received events\ with (assigned automatically by default) :param prefix: The prefix to "or" with generated multicast keys\ (default is no prefix) :param prefix_type: Whether the prefix should apply to the upper or\ lower half of the multicast keys (default is upper half) :param check_keys: True if the keys of received events should be\ verified before sending (default False) :param send_buffer_times: An array of arrays of times at which keys\ should be sent (one array for each key, default disabled) :param send_buffer_max_space: The maximum amount of space to use of\ the SDRAM on the machine (default is 1MB) :param send_buffer_space_before_notify: The amount of space free in\ the sending buffer before the machine will ask the host for\ more data (default setting is optimised for most cases) :param send_buffer_notification_ip_address: The IP address of the host\ that will send new buffers (must be specified if a send buffer\ is specified) :param send_buffer_notification_port: The port that the host that will\ send new buffers is listening on (must be specified if a\ send buffer is specified) :param send_buffer_notification_tag: The IP tag to use to notify the\ host about space in the buffer (default is to use any tag) """ AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__(self, n_keys, label, max_atoms_per_core, constraints) # Store the parameters self._board_address = board_address self._receive_port = receive_port self._receive_sdp_port = receive_sdp_port self._receive_tag = receive_tag self._virtual_key = virtual_key self._prefix = prefix self._prefix_type = prefix_type self._check_keys = check_keys self._send_buffer_times = send_buffer_times self._send_buffer_max_space = send_buffer_max_space self._send_buffer_space_before_notify = send_buffer_space_before_notify self._send_buffer_notification_ip_address = \ send_buffer_notification_ip_address self._send_buffer_notification_port = send_buffer_notification_port self._send_buffer_notification_tag = send_buffer_notification_tag # Store recording parameters for later self._recording_enabled = False self._record_buffering_ip_address = None self._record_buffering_port = None self._record_buffering_board_address = None self._record_buffering_tag = None self._record_buffer_size = 0 self._record_buffer_size_before_receive = 0 self._minimum_sdram_for_buffering = 0 self._using_auto_pause_and_resume = False # Keep the subvertices for resuming runs self._subvertices = list() self._first_machine_time_step = 0
def __init__(self, machine_time_step, timescale_factor, ip_address, port, board_address=None, tag=None, strip_sdp=True, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, constraints=None, label=None): """ """ if ((message_type == EIEIOType.KEY_PAYLOAD_32_BIT or message_type == EIEIOType.KEY_PAYLOAD_16_BIT) and use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as " "payload to each key, not both") if ((message_type == EIEIOType.KEY_32_BIT or message_type == EIEIOType.KEY_16_BIT) and not use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as" " payload to each key, but current configuration does not " "specify either of these") if (not isinstance(prefix_type, EIEIOPrefix) and prefix_type is not None): raise ConfigurationException( "the type of a prefix type should be of a EIEIOPrefix, " "which can be located in :" "SpinnMan.messages.eieio.eieio_prefix_type") if label is None: label = "Live Packet Gatherer" AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractPartitionableVertex.__init__(self, n_atoms=1, label=label, max_atoms_per_core=1, constraints=constraints) # add constraints the partitioned vertex decides it needs constraints_to_add = \ LivePacketGatherPartitionedVertex.get_constraints( ip_address, port, strip_sdp, board_address, tag) for constraint in constraints_to_add: self.add_constraint(constraint) self._prefix_type = prefix_type self._use_prefix = use_prefix self._key_prefix = key_prefix self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step