def __init__( self, hostname, port, board_address=None, tag=None, strip_sdp=True, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, constraints=None, label=None): """ """ if ((message_type == EIEIOType.KEY_PAYLOAD_32_BIT or message_type == EIEIOType.KEY_PAYLOAD_16_BIT) and use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as " "payload to each key, not both") if ((message_type == EIEIOType.KEY_32_BIT or message_type == EIEIOType.KEY_16_BIT) and not use_payload_prefix and payload_as_time_stamps): raise ConfigurationException( "Timestamp can either be included as payload prefix or as" " payload to each key, but current configuration does not " "specify either of these") if (not isinstance(prefix_type, EIEIOPrefix) and prefix_type is not None): raise ConfigurationException( "the type of a prefix type should be of a EIEIOPrefix, " "which can be located in :" "SpinnMan.messages.eieio.eieio_prefix_type") if label is None: label = "Live Packet Gatherer" ApplicationVertex.__init__(self, label, constraints, 1) # Try to place this near the Ethernet self.add_constraint(RadialPlacementFromChipConstraint(0, 0)) # storage objects self._iptags = None # tag info self._ip_address = hostname self._port = port self._board_address = board_address self._tag = tag self._strip_sdp = strip_sdp # eieio info self._prefix_type = prefix_type self._use_prefix = use_prefix self._key_prefix = key_prefix self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step
def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._reward_delay = reward_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, delay_per_stage, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ ApplicationVertex.__init__(self, label, constraints, 256) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesNKeysForPartition.__init__(self) self._source_vertex = source_vertex self._n_delay_stages = 0 self._delay_per_stage = delay_per_stage # atom store self._n_atoms = n_neurons # Dictionary of vertex_slice -> delay block for data specification self._delay_blocks = dict() self.add_constraint( PartitionerSameSizeAsVertexConstraint(source_vertex))
def __init__(self, n_neurons, constraints=none_pynn_default_parameters['constraints'], label=none_pynn_default_parameters['label'], rate=default_parameters['rate'], start=default_parameters['start'], duration=default_parameters['duration'], seed=none_pynn_default_parameters['seed']): ApplicationVertex.__init__(self, label, constraints, self._model_based_max_atoms_per_core) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractChangableAfterRun.__init__(self) SimplePopulationSettable.__init__(self) ProvidesKeyToAtomMappingImpl.__init__(self) config = globals_variables.get_simulator().config # atoms params self._n_atoms = n_neurons self._seed = None # check for changes parameters self._change_requires_mapping = True self._change_requires_neuron_parameters_reload = False # Store the parameters self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons) self._start = utility_calls.convert_param_to_numpy(start, n_neurons) self._duration = utility_calls.convert_param_to_numpy( duration, n_neurons) self._time_to_spike = utility_calls.convert_param_to_numpy( 0, n_neurons) self._rng = numpy.random.RandomState(seed) self._machine_time_step = None # Prepare for recording, and to get spikes self._spike_recorder = MultiSpikeRecorder() self._time_between_requests = config.getint("Buffers", "time_between_requests") self._receive_buffer_host = config.get("Buffers", "receive_buffer_host") self._receive_buffer_port = helpful_functions.read_config_int( config, "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") spike_buffer_max_size = 0 self._buffer_size_before_receive = None if config.getboolean("Buffers", "enable_buffered_recording"): spike_buffer_max_size = config.getint("Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._maximum_sdram_for_buffering = [spike_buffer_max_size]
def __init__(self, n_atoms, reload_region_data): """ :param n_atoms: The number of atoms in the vertex :param reload_region_data: list of tuples of (region_id, data to write) """ ApplicationVertex.__init__(self) self._n_atoms = n_atoms self._regenerate_call_count = 0 self._requires_regions_to_be_reloaded = True self._reload_region_data = reload_region_data
def __init__(self, label, constraints): ApplicationVertex.__init__(self, label, constraints, 1) self._timed_commands = list() self._commands_at_start_resume = list() self._commands_at_pause_stop = list() self._partition_id_to_keys = dict() self._keys_to_partition_id = dict() self._edge_partition_id_counter = 0 self._vertex_to_key_map = dict()
def __init__(self, label, constraints, n_samples_per_recording, sampling_frequency): """ chip power monitor application vertex constructor :param label: vertex label :param constraints: constraints for the vertex :param n_samples_per_recording: how many samples to take before / recording to sdram the total :param sampling_frequency: how many microseconds between sampling """ ApplicationVertex.__init__(self, label, constraints, 1) self._n_samples_per_recording = n_samples_per_recording self._sampling_frequency = sampling_frequency
def __init__(self, n_neurons, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Bandit", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = width self._height = height self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(height))) self._n_neurons = ( 1 << (self._width_bits + self._height_bits + self._colour_bits + 1)) # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, spikes_per_second=AbstractPopulationVertex. none_pynn_default_parameters['spikes_per_second'], ring_buffer_sigma=AbstractPopulationVertex. none_pynn_default_parameters['ring_buffer_sigma'], incoming_spike_buffer_size=None, constraints=None, label="Convolution core", src_width=WIDTH_PIXELS, src_height=HEIGHT_PIXELS, src_polarity_bits=POLARITY_BITS, polarity=POLARITY, sample_step_width=SAMPLE_STEP_WIDTH, sample_step_height=SAMPLE_STEP_HEIGHT, kernel=KERNEL, threshold=THRESHOLD, use_xyp_or_pyx=KEY_FORMATS.USE_XYP.value, time_window=TIME_WINDOW): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = numpy.uint32(src_width) #1 self._height = numpy.uint32(src_height) #2 self._polarity_bits = numpy.uint32(src_polarity_bits) #3 self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(src_width))) #4 self._height_bits = numpy.uint32(numpy.ceil( numpy.log2(src_height))) #5 self._kernel_width = numpy.uint32(kernel.shape[1]) #6 self._kernel_height = numpy.uint32(kernel.shape[0]) #7 self._step_width = numpy.uint32( numpy.round(numpy.log2(sample_step_width))) #8 self._step_height = numpy.uint32( numpy.round(numpy.log2(sample_step_height))) #9 self._threshold = numpy.float16(threshold) #10 self._use_xyp_or_pyx = numpy.uint32(use_xyp_or_pyx) #11 self._time_window = numpy.uint32(time_window) #12 self._start_width = kernel.shape[1] // 2 self._start_height = kernel.shape[0] // 2 self._out_width = subsamp_size(self._start_width, self._width, 1 << self._step_width) #13 self._out_height = subsamp_size(self._start_height, self._height, 1 << self._step_height) #14 self._out_width_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_width))) #15 self._out_height_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_height))) #16 self._polarity = polarity #17 self._n_neurons = (1 << (self._out_width_bits + self._out_height_bits + self._polarity_bits)) self._kernel = numpy.float16(kernel) # M #params, kernel, key self._memory_size_in_bytes = (17 + kernel.size + 1) * 4 # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, label, constraints, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, neuron_impl, pynn_model): """ :param int n_neurons: The number of neurons in the population :param str label: The label on the population :param list(~pacman.model.constraints.AbstractConstraint) constraints: Constraints on where a population's vertices may be placed. :param int max_atoms_per_core: The maximum number of atoms (neurons) per SpiNNaker core. :param spikes_per_second: Expected spike rate :type spikes_per_second: float or None :param ring_buffer_sigma: How many SD above the mean to go for upper bound of ring buffer \ size; a good starting choice is 5.0. Given length of simulation \ we can set this for approximate number of saturation events. :type ring_buffer_sigma: float or None :param incoming_spike_buffer_size: :type incoming_spike_buffer_size: int or None :param AbstractNeuronImpl neuron_impl: The (Python side of the) implementation of the neurons themselves. :param AbstractPyNNNeuronModel pynn_model: The PyNN neuron model that this vertex is working on behalf of. """ # pylint: disable=too-many-arguments, too-many-locals ApplicationVertex.__init__(self, label, constraints, max_atoms_per_core) self.__n_atoms = n_neurons self.__n_subvertices = 0 self.__n_data_specs = 0 # buffer data self.__incoming_spike_buffer_size = incoming_spike_buffer_size # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self.__incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size") self.__neuron_impl = neuron_impl self.__pynn_model = pynn_model self._parameters = SpynnakerRangeDictionary(n_neurons) self._state_variables = SpynnakerRangeDictionary(n_neurons) self.__neuron_impl.add_parameters(self._parameters) self.__neuron_impl.add_state_variables(self._state_variables) self.__initial_state_variables = None self.__updated_state_variables = set() # Set up for recording recordable_variables = list( self.__neuron_impl.get_recordable_variables()) record_data_types = dict( self.__neuron_impl.get_recordable_data_types()) self.__neuron_recorder = NeuronRecorder(recordable_variables, record_data_types, [NeuronRecorder.SPIKES], n_neurons) # Set up synapse handling self.__synapse_manager = SynapticManager( self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma, spikes_per_second, config) # bool for if state has changed. self.__change_requires_mapping = True self.__change_requires_neuron_parameters_reload = False self.__change_requires_data_generation = False self.__has_reset_last = True # Set up for profiling self.__n_profile_samples = helpful_functions.read_config_int( config, "Reports", "n_profile_samples")
def __init__(self, n_atoms, label=None, max_atoms_per_core=256): ApplicationVertex.__init__(self, label=label, max_atoms_per_core=max_atoms_per_core) self._model_based_max_atoms_per_core = max_atoms_per_core self._n_atoms = n_atoms
def __init__( self, n_keys, label=None, constraints=None, max_atoms_per_core=sys.maxint, # General parameters board_address=None, # Live input parameters receive_port=None, receive_sdp_port=(constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value), receive_tag=None, receive_rate=10, # Key parameters virtual_key=None, prefix=None, prefix_type=None, check_keys=False, # Send buffer parameters send_buffer_times=None, send_buffer_partition_id=None, send_buffer_max_space=(constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP), send_buffer_space_before_notify=640, # Buffer parameters buffer_notification_ip_address=None, buffer_notification_port=None, buffer_notification_tag=None, # Extra flag for input without a reserved port reserve_reverse_ip_tag=False): """ :param n_keys: The number of keys to be sent via this multicast source :param label: The label of this vertex :param constraints: Any initial constraints to this vertex :param board_address: The IP address of the board on which to place\ this vertex if receiving data, either buffered or live (by\ default, any board is chosen) :param receive_port: The port on the board that will listen for\ incoming event packets (default is to disable this feature;\ set a value to enable it) :param receive_sdp_port: The SDP port to listen on for incoming event\ packets (defaults to 1) :param receive_tag: The IP tag to use for receiving live events\ (uses any by default) :param receive_rate: The estimated rate of packets that will be sent\ by this source :param virtual_key: The base multicast key to send received events\ with (assigned automatically by default) :param prefix: The prefix to "or" with generated multicast keys\ (default is no prefix) :param prefix_type: Whether the prefix should apply to the upper or\ lower half of the multicast keys (default is upper half) :param check_keys: True if the keys of received events should be\ verified before sending (default False) :param send_buffer_times: An array of arrays of times at which keys\ should be sent (one array for each key, default disabled) :param send_buffer_partition_id: The id of the partition containing\ the edges down which the events are to be sent :param send_buffer_max_space: The maximum amount of space to use of\ the SDRAM on the machine (default is 1MB) :param send_buffer_space_before_notify: The amount of space free in\ the sending buffer before the machine will ask the host for\ more data (default setting is optimised for most cases) :param buffer_notification_ip_address: The IP address of the host\ that will send new buffers (must be specified if a send buffer\ is specified or if recording will be used) :param buffer_notification_port: The port that the host that will\ send new buffers is listening on (must be specified if a\ send buffer is specified, or if recording will be used) :param buffer_notification_tag: The IP tag to use to notify the\ host about space in the buffer (default is to use any tag) """ ApplicationVertex.__init__(self, label, constraints, max_atoms_per_core) ProvidesKeyToAtomMappingImpl.__init__(self) # basic items self._n_atoms = n_keys # Store the parameters for EIEIO self._board_address = board_address self._receive_port = receive_port self._receive_sdp_port = receive_sdp_port self._receive_tag = receive_tag self._receive_rate = receive_rate self._virtual_key = virtual_key self._prefix = prefix self._prefix_type = prefix_type self._check_keys = check_keys self._reverse_iptags = None if receive_port is not None or reserve_reverse_ip_tag: self._reverse_iptags = [ ReverseIPtagResource(port=receive_port, sdp_port=receive_sdp_port, tag=receive_tag) ] if board_address is not None: self.add_constraint(BoardConstraint(board_address)) # Store the send buffering details self._send_buffer_times = send_buffer_times self._send_buffer_partition_id = send_buffer_partition_id self._send_buffer_max_space = send_buffer_max_space self._send_buffer_space_before_notify = send_buffer_space_before_notify # Store the buffering details self._buffer_notification_ip_address = buffer_notification_ip_address self._buffer_notification_port = buffer_notification_port self._buffer_notification_tag = buffer_notification_tag self._reserve_reverse_ip_tag = reserve_reverse_ip_tag self._iptags = None if send_buffer_times is not None: self._iptags = [ IPtagResource(buffer_notification_ip_address, buffer_notification_port, True, buffer_notification_tag) ] if board_address is not None: self.add_constraint(BoardConstraint(board_address)) # Store recording parameters self._record_buffer_size = 0 self._record_buffer_size_before_receive = 0 self._record_time_between_requests = 0 # Keep the vertices for resuming runs self._machine_vertices = list()