def __init__(self, n_neurons, machine_time_step, timescale_factor, spinnaker_link_id, speed=30, sample_time=4096, update_time=512, delay_time=5, delta_threshold=23, continue_if_not_different=True, label="RobotMotorControl"): """ """ if n_neurons != 6: logger.warn("The specified number of neurons for the munich motor" " device has been ignored; 6 will be used instead") AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractPartitionableVertex.__init__(self, 6, label, 6, None) AbstractVertexWithEdgeToDependentVertices.__init__( self, [_MunichMotorDevice(spinnaker_link_id)], None) AbstractProvidesOutgoingPartitionConstraints.__init__(self) self._speed = speed self._sample_time = sample_time self._update_time = update_time self._delay_time = delay_time self._delta_threshold = delta_threshold self._continue_if_not_different = continue_if_not_different
def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], reward_based=default_parameters['reward_based'], rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], stochastic=default_parameters['stochastic'], constant_input=default_parameters['constant_input'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._rand_seed = rand_seed self._reward_delay = reward_delay self._reward_based = reward_based self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._constant_input = constant_input # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) resources_required = ( self.BANDIT_REGION_BYTES + self.BASE_ARMS_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Bandit, self).__init__( BanditMachineVertex( vertex_slice, resources_required, constraints, label, self, arms, reward_delay, reward_based, rate_on, rate_off, stochastic, constant_input, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._reward_delay = reward_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, delay_per_stage, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ ApplicationVertex.__init__(self, label, constraints, 256) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesNKeysForPartition.__init__(self) self._source_vertex = source_vertex self._n_delay_stages = 0 self._delay_per_stage = delay_per_stage # atom store self._n_atoms = n_neurons # Dictionary of vertex_slice -> delay block for data specification self._delay_blocks = dict() self.add_constraint( PartitionerSameSizeAsVertexConstraint(source_vertex))
def __init__( self, n_neurons, machine_time_step, timescale_factor, label, port, virtual_key=None): ReverseIpTagMultiCastSource.__init__( self, n_keys=n_neurons, machine_time_step=machine_time_step, timescale_factor=timescale_factor, label=label, receive_port=port, virtual_key=virtual_key) AbstractProvidesOutgoingPartitionConstraints.__init__(self)
def __init__(self, n_neurons, constraints=none_pynn_default_parameters['constraints'], label=none_pynn_default_parameters['label'], rate=default_parameters['rate'], start=default_parameters['start'], duration=default_parameters['duration'], seed=none_pynn_default_parameters['seed']): ApplicationVertex.__init__(self, label, constraints, self._model_based_max_atoms_per_core) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractChangableAfterRun.__init__(self) SimplePopulationSettable.__init__(self) ProvidesKeyToAtomMappingImpl.__init__(self) config = globals_variables.get_simulator().config # atoms params self._n_atoms = n_neurons self._seed = None # check for changes parameters self._change_requires_mapping = True self._change_requires_neuron_parameters_reload = False # Store the parameters self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons) self._start = utility_calls.convert_param_to_numpy(start, n_neurons) self._duration = utility_calls.convert_param_to_numpy( duration, n_neurons) self._time_to_spike = utility_calls.convert_param_to_numpy( 0, n_neurons) self._rng = numpy.random.RandomState(seed) self._machine_time_step = None # Prepare for recording, and to get spikes self._spike_recorder = MultiSpikeRecorder() self._time_between_requests = config.getint("Buffers", "time_between_requests") self._receive_buffer_host = config.get("Buffers", "receive_buffer_host") self._receive_buffer_port = helpful_functions.read_config_int( config, "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") spike_buffer_max_size = 0 self._buffer_size_before_receive = None if config.getboolean("Buffers", "enable_buffered_recording"): spike_buffer_max_size = config.getint("Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._maximum_sdram_for_buffering = [spike_buffer_max_size]
def __init__(self, n_neurons, constraints=None, label="Breakout"): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless # Superclasses ApplicationVertex.__init__( self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) PopulationSettableChangeRequiresMapping.__init__(self)
def __init__(self, spinnaker_link_id, board_address=None, constraints=None, label=None): ApplicationSpiNNakerLinkVertex.__init__( self, n_atoms=NUM_NEUR_IN, spinnaker_link_id=spinnaker_link_id, board_address=board_address, label=label) AbstractProvidesNKeysForPartition.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractSendMeMulticastCommandsVertex.__init__(self)
def __init__(self, machine_time_step, timescale_factor): AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractPartitionableVertex.__init__( self, 1, "Command Sender", 1) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) self._edge_constraints = dict() self._command_edge = dict() self._times_with_commands = set() self._commands_with_payloads = dict() self._commands_without_payloads = dict()
def __init__(self, retina_key, spinnaker_link_id, position, machine_time_step, timescale_factor, label=None, n_neurons=None, polarity=None): if polarity is None: polarity = MunichRetinaDevice.MERGED_POLARITY self._fixed_key = (retina_key & 0xFFFF) << 16 self._fixed_mask = 0xFFFF8000 if polarity == MunichRetinaDevice.UP_POLARITY: self._fixed_key |= 0x4000 if polarity == MunichRetinaDevice.MERGED_POLARITY: # There are 128 x 128 retina "pixels" x 2 polarities fixed_n_neurons = 128 * 128 * 2 else: # There are 128 x 128 retina "pixels" fixed_n_neurons = 128 * 128 self._fixed_mask = 0xFFFFC000 AbstractVirtualVertex.__init__(self, fixed_n_neurons, spinnaker_link_id, max_atoms_per_core=fixed_n_neurons, label=label) AbstractSendMeMulticastCommandsVertex.__init__( self, self._get_commands(position)) AbstractProvidesOutgoingPartitionConstraints.__init__(self) self._polarity = polarity self._position = position if (self._position != self.RIGHT_RETINA and self._position != self.LEFT_RETINA): raise exceptions.SpynnakerException( "The external Retina does not recognise this _position") if n_neurons != fixed_n_neurons and n_neurons is not None: print "Warning, the retina will have {} neurons".format( fixed_n_neurons)
def __init__(self, n_neurons, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Bandit", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = width self._height = height self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(height))) self._n_neurons = ( 1 << (self._width_bits + self._height_bits + self._colour_bits + 1)) # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__( self, n_neurons, machine_time_step, timescale_factor, constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0, duration=None, seed=None): AbstractPartitionableVertex.__init__( self, n_neurons, label, self._model_based_max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) PopulationSettableChangeRequiresMapping.__init__(self) # Store the parameters self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons) self._start = utility_calls.convert_param_to_numpy(start, n_neurons) self._duration = utility_calls.convert_param_to_numpy( duration, n_neurons) self._rng = numpy.random.RandomState(seed) # Prepare for recording, and to get spikes self._spike_recorder = MultiSpikeRecorder(machine_time_step) self._spike_buffer_max_size = config.getint( "Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint( "Buffers", "time_between_requests") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") self._receive_buffer_host = config.get( "Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint( "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint( "Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume")
def __init__( self, n_neurons, machine_time_step, timescale_factor, constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0, duration=None, seed=None): AbstractPartitionableVertex.__init__( self, n_neurons, label, self._model_based_max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) PopulationSettableChangeRequiresMapping.__init__(self) # Store the parameters self._rate = rate self._start = start self._duration = duration self._rng = numpy.random.RandomState(seed) # Prepare for recording, and to get spikes self._spike_recorder = SpikeRecorder(machine_time_step) self._spike_buffer_max_size = config.getint( "Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint( "Buffers", "time_between_requests") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") self._receive_buffer_host = config.get( "Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint( "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint( "Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume")
def __init__(self, n_neurons, delay_per_stage, source_vertex, machine_time_step, timescale_factor, constraints=None, label="DelayExtension"): """ Creates a new DelayExtension Object. """ AbstractPartitionableVertex.__init__( self, n_neurons, label, 256, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step=machine_time_step, timescale_factor=timescale_factor) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesNKeysForPartition.__init__(self) self._source_vertex = source_vertex self._n_delay_stages = 0 self._delay_per_stage = delay_per_stage # Dictionary of vertex_slice -> delay block for data specification self._delay_blocks = dict() self.add_constraint( PartitionerSameSizeAsVertexConstraint(source_vertex))
def __init__(self, rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], pop_size=default_parameters['pop_size'], prob_command=default_parameters['prob_command'], prob_in_change=default_parameters['prob_in_change'], time_period=default_parameters['time_period'], stochastic=default_parameters['stochastic'], reward=default_parameters['reward'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._reward = reward self._pop_size = pop_size self._prob_command = prob_command self._prob_in_change = prob_in_change self._n_neurons = pop_size * 4 self._rand_seed = rand_seed self._time_period = time_period # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # technically as using OneAppOneMachine this is not necessary? resources_required = ( self.RECALL_REGION_BYTES + self.DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Recall, self).__init__( RecallMachineVertex( vertex_slice, resources_required, constraints, label, self, rate_on, rate_off, pop_size, prob_command, prob_in_change, time_period, stochastic, reward, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, truth_table, input_sequence, rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], score_delay=default_parameters['score_delay'], stochastic=default_parameters['stochastic'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._truth_table = truth_table self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._input_sequence = input_sequence self._no_inputs = len(input_sequence) if self._no_inputs != numpy.log2(len(self._truth_table)): try: raise Bad_Table('table and input sequence are not compatible') except Bad_Table as e: print("ERROR: ", e) self._n_neurons = self._no_inputs self._rand_seed = rand_seed self._score_delay = score_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # (static) resources required # technically as using OneAppOneMachine this is not necessary? resources_required = (self.LOGIC_REGION_BYTES + self.BASE_DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Logic, self).__init__(LogicMachineVertex( vertex_slice, resources_required, constraints, label, self, truth_table, input_sequence, rate_on, rate_off, score_delay, stochastic, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, mode, retina_key, spinnaker_link_id, polarity, machine_time_step, timescale_factor, label=None, n_neurons=None): """ :param mode: The retina "mode" :param retina_key: The value of the top 16-bits of the key :param spinnaker_link_id: The spinnaker link to which the retina is\ connected :param polarity: The "polarity" of the retina data :param machine_time_step: The time step of the simulation :param timescale_factor: The timescale factor of the simulation :param label: The label for the population :param n_neurons: The number of neurons in the population """ self._polarity = polarity self._fixed_key = (retina_key & 0xFFFF) << 16 self._fixed_mask = 0xFFFF8000 if polarity == ExternalFPGARetinaDevice.UP_POLARITY: self._fixed_key |= 0x4000 fixed_n_neurons = n_neurons if mode == ExternalFPGARetinaDevice.MODE_128: if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or polarity == ExternalFPGARetinaDevice.DOWN_POLARITY): fixed_n_neurons = 128 * 128 self._fixed_mask = 0xFFFFC000 else: fixed_n_neurons = 128 * 128 * 2 elif mode == ExternalFPGARetinaDevice.MODE_64: if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or polarity == ExternalFPGARetinaDevice.DOWN_POLARITY): fixed_n_neurons = 64 * 64 self._fixed_mask = 0xFFFFF000 else: fixed_n_neurons = 64 * 64 * 2 elif mode == ExternalFPGARetinaDevice.MODE_32: if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or polarity == ExternalFPGARetinaDevice.DOWN_POLARITY): fixed_n_neurons = 32 * 32 self._fixed_mask = 0xFFFFFC00 else: fixed_n_neurons = 32 * 32 * 2 elif mode == ExternalFPGARetinaDevice.MODE_16: if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or polarity == ExternalFPGARetinaDevice.DOWN_POLARITY): fixed_n_neurons = 16 * 16 self._fixed_mask = 0xFFFFFF00 else: fixed_n_neurons = 16 * 16 * 2 else: raise exceptions.SpynnakerException("the FPGA retina does not " "recongise this mode") if fixed_n_neurons != n_neurons and n_neurons is not None: logger.warn( "The specified number of neurons for the FPGA retina" " device has been ignored {} will be used instead".format( fixed_n_neurons)) AbstractVirtualVertex.__init__(self, fixed_n_neurons, spinnaker_link_id, max_atoms_per_core=fixed_n_neurons, label=label) AbstractSendMeMulticastCommandsVertex.__init__( self, commands=[ MultiCastCommand(0, 0x0000FFFF, 0xFFFF0000, 1, 5, 100), MultiCastCommand(-1, 0x0000FFFE, 0xFFFF0000, 0, 5, 100) ]) AbstractProvidesOutgoingPartitionConstraints.__init__(self)
def __init__(self, constraints=default_parameters['constraints'], encoding=default_parameters['encoding'], time_increment=default_parameters['time_increment'], pole_length=default_parameters['pole_length'], pole_angle=default_parameters['pole_angle'], pole2_length=default_parameters['pole2_length'], pole2_angle=default_parameters['pole2_angle'], reward_based=default_parameters['reward_based'], force_increments=default_parameters['force_increments'], max_firing_rate=default_parameters['max_firing_rate'], number_of_bins=default_parameters['number_of_bins'], central=default_parameters['central'], rand_seed=default_parameters['rand_seed'], bin_overlap=default_parameters['bin_overlap'], tau_force=default_parameters['tau_force'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._encoding = encoding # Pass in variables self._pole_length = pole_length self._pole_angle = pole_angle self._pole2_length = pole2_length self._pole2_angle = pole2_angle self._force_increments = force_increments # for rate based it's only 1 neuron per metric # (position, angle, velocity of both) self._n_neurons = 6 * number_of_bins self._time_increment = time_increment self._reward_based = reward_based self._max_firing_rate = max_firing_rate self._number_of_bins = number_of_bins self._central = central self._rand_seed = rand_seed self._bin_overlap = bin_overlap self._tau_force = tau_force # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # technically as using OneAppOneMachine this is not necessary? resources_required = (self.PENDULUM_REGION_BYTES + self.BASE_DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(DoublePendulum, self).__init__(DoublePendulumMachineVertex( vertex_slice, resources_required, constraints, label, self, encoding, time_increment, pole_length, pole_angle, pole2_length, pole2_angle, reward_based, force_increments, max_firing_rate, number_of_bins, central, bin_overlap, tau_force, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, spikes_per_second=AbstractPopulationVertex. none_pynn_default_parameters['spikes_per_second'], ring_buffer_sigma=AbstractPopulationVertex. none_pynn_default_parameters['ring_buffer_sigma'], incoming_spike_buffer_size=None, constraints=None, label="Convolution core", src_width=WIDTH_PIXELS, src_height=HEIGHT_PIXELS, src_polarity_bits=POLARITY_BITS, polarity=POLARITY, sample_step_width=SAMPLE_STEP_WIDTH, sample_step_height=SAMPLE_STEP_HEIGHT, kernel=KERNEL, threshold=THRESHOLD, use_xyp_or_pyx=KEY_FORMATS.USE_XYP.value, time_window=TIME_WINDOW): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = numpy.uint32(src_width) #1 self._height = numpy.uint32(src_height) #2 self._polarity_bits = numpy.uint32(src_polarity_bits) #3 self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(src_width))) #4 self._height_bits = numpy.uint32(numpy.ceil( numpy.log2(src_height))) #5 self._kernel_width = numpy.uint32(kernel.shape[1]) #6 self._kernel_height = numpy.uint32(kernel.shape[0]) #7 self._step_width = numpy.uint32( numpy.round(numpy.log2(sample_step_width))) #8 self._step_height = numpy.uint32( numpy.round(numpy.log2(sample_step_height))) #9 self._threshold = numpy.float16(threshold) #10 self._use_xyp_or_pyx = numpy.uint32(use_xyp_or_pyx) #11 self._time_window = numpy.uint32(time_window) #12 self._start_width = kernel.shape[1] // 2 self._start_height = kernel.shape[0] // 2 self._out_width = subsamp_size(self._start_width, self._width, 1 << self._step_width) #13 self._out_height = subsamp_size(self._start_height, self._height, 1 << self._step_height) #14 self._out_width_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_width))) #15 self._out_height_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_height))) #16 self._polarity = polarity #17 self._n_neurons = (1 << (self._out_width_bits + self._out_height_bits + self._polarity_bits)) self._kernel = numpy.float16(kernel) # M #params, kernel, key self._memory_size_in_bytes = (17 + kernel.size + 1) * 4 # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__( self, n_keys, resources_required, machine_time_step, timescale_factor, label, constraints=None, # General input and output parameters board_address=None, # Live input parameters receive_port=None, receive_sdp_port=( constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value), receive_tag=None, # Key parameters virtual_key=None, prefix=None, prefix_type=None, check_keys=False, # Send buffer parameters send_buffer_times=None, send_buffer_max_space=( constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP), send_buffer_space_before_notify=640, send_buffer_notification_ip_address=None, send_buffer_notification_port=None, send_buffer_notification_tag=None): """ :param n_keys: The number of keys to be sent via this multicast source :param resources_required: The resources required by the vertex :param machine_time_step: The time step to be used on the machine :param timescale_factor: The time scaling to be used in the simulation :param label: The label of this vertex :param constraints: Any initial constraints to this vertex :param board_address: The IP address of the board on which to place\ this vertex if receiving data, either buffered or live (by\ default, any board is chosen) :param receive_port: The port on the board that will listen for\ incoming event packets (default is to disable this feature;\ set a value to enable it) :param receive_sdp_port: The SDP port to listen on for incoming event\ packets (defaults to 1) :param receive_tag: The IP tag to use for receiving live events\ (uses any by default) :param virtual_key: The base multicast key to send received events\ with (assigned automatically by default) :param prefix: The prefix to "or" with generated multicast keys\ (default is no prefix) :param prefix_type: Whether the prefix should apply to the upper or\ lower half of the multicast keys (default is upper half) :param check_keys: True if the keys of received events should be\ verified before sending (default False) :param send_buffer_times: An array of arrays of times at which keys\ should be sent (one array for each key, default disabled) :param send_buffer_max_space: The maximum amount of space to use of\ the SDRAM on the machine (default is 1MB) :param send_buffer_space_before_notify: The amount of space free in\ the sending buffer before the machine will ask the host for\ more data (default setting is optimised for most cases) :param send_buffer_notification_ip_address: The IP address of the host\ that will send new buffers (must be specified if a send buffer\ is specified) :param send_buffer_notification_port: The port that the host that will\ send new buffers is listening on (must be specified if a\ send buffer is specified) :param send_buffer_notification_tag: The IP tag to use to notify the\ host about space in the buffer (default is to use any tag) """ # Set up super types PartitionedVertex.__init__( self, resources_required, label, constraints) AbstractDataSpecableVertex.__init__( self, machine_time_step, timescale_factor) ProvidesProvenanceDataFromMachineImpl.__init__( self, self._REGIONS.PROVENANCE_REGION.value, 0) AbstractProvidesOutgoingPartitionConstraints.__init__(self) ReceiveBuffersToHostBasicImpl.__init__(self) # Set up for receiving live packets if receive_port is not None: self.add_constraint(TagAllocatorRequireReverseIptagConstraint( receive_port, receive_sdp_port, board_address, receive_tag)) # Work out if buffers are being sent self._first_machine_time_step = 0 self._send_buffer = None if send_buffer_times is None: self._send_buffer_times = None SendsBuffersFromHostPreBufferedImpl.__init__( self, None) else: self._send_buffer = BufferedSendingRegion(send_buffer_max_space) self._send_buffer_times = send_buffer_times self.add_constraint(TagAllocatorRequireIptagConstraint( send_buffer_notification_ip_address, send_buffer_notification_port, True, board_address, send_buffer_notification_tag)) SendsBuffersFromHostPreBufferedImpl.__init__( self, {self._REGIONS.SEND_BUFFER.value: self._send_buffer}) # buffered out parameters self._send_buffer_space_before_notify = send_buffer_space_before_notify self._send_buffer_notification_ip_address = \ send_buffer_notification_ip_address self._send_buffer_notification_port = send_buffer_notification_port self._send_buffer_notification_tag = send_buffer_notification_tag if self._send_buffer_space_before_notify > send_buffer_max_space: self._send_buffer_space_before_notify = send_buffer_max_space # Set up for recording (if requested) self._record_buffer_size = 0 self._buffer_size_before_receive = 0 # set flag for checking if in injection mode self._in_injection_mode = receive_port is not None # Sort out the keys to be used self._n_keys = n_keys self._virtual_key = virtual_key self._mask = None self._prefix = prefix self._prefix_type = prefix_type self._check_keys = check_keys # Work out the prefix details if self._prefix is not None: if self._prefix_type is None: self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD: self._prefix = prefix << 16 # If the user has specified a virtual key if self._virtual_key is not None: # check that virtual key is valid if self._virtual_key < 0: raise ConfigurationException( "Virtual keys must be positive") # Get a mask and maximum number of keys for the number of keys # requested self._mask, max_key = self._calculate_mask(n_keys) # Check that the number of keys and the virtual key don't interfere if n_keys > max_key: raise ConfigurationException( "The mask calculated from the number of keys will " "not work with the virtual key specified") if self._prefix is not None: # Check that the prefix doesn't change the virtual key in the # masked area masked_key = (self._virtual_key | self._prefix) & self._mask if self._virtual_key != masked_key: raise ConfigurationException( "The number of keys, virtual key and key prefix" " settings don't work together") else: # If no prefix was generated, generate one self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD self._prefix = self._virtual_key
def __init__( self, n_neurons, machine_time_step, timescale_factor, spike_times=None, port=None, tag=None, ip_address=None, board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=( constants.SPIKE_BUFFER_SIZE_BUFFERING_IN), space_before_notification=640, constraints=None, label="SpikeSourceArray", spike_recorder_buffer_size=( constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT), buffer_size_before_receive=( constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE)): self._ip_address = ip_address if ip_address is None: self._ip_address = config.get("Buffers", "receive_buffer_host") self._port = port if port is None: self._port = config.getint("Buffers", "receive_buffer_port") if spike_times is None: spike_times = [] self._minimum_sdram_for_buffering = config.getint( "Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") ReverseIpTagMultiCastSource.__init__( self, n_keys=n_neurons, machine_time_step=machine_time_step, timescale_factor=timescale_factor, label=label, constraints=constraints, max_atoms_per_core=(SpikeSourceArray. _model_based_max_atoms_per_core), board_address=board_address, receive_port=None, receive_sdp_port=None, receive_tag=None, virtual_key=None, prefix=None, prefix_type=None, check_keys=False, send_buffer_times=spike_times, send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes, send_buffer_space_before_notify=space_before_notification, send_buffer_notification_ip_address=self._ip_address, send_buffer_notification_port=self._port, send_buffer_notification_tag=tag) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractHasFirstMachineTimeStep.__init__(self) # handle recording self._spike_recorder = EIEIOSpikeRecorder(machine_time_step) self._spike_recorder_buffer_size = spike_recorder_buffer_size self._buffer_size_before_receive = buffer_size_before_receive # Keep track of any previously generated buffers self._send_buffers = dict() self._spike_recording_region_size = None self._partitioned_vertices = list() self._partitioned_vertices_current_max_buffer_size = dict() # used for reset and rerun self._requires_mapping = True self._last_runtime_position = 0 self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = \ front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise exceptions.ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") if (self._max_on_chip_memory_usage_for_spikes < self._space_before_notification): self._space_before_notification =\ self._max_on_chip_memory_usage_for_spikes
def __init__(self, fixed_key, spinnaker_link_id, machine_time_step, timescale_factor, label=None, n_neurons=None, polarity=PushBotRetinaPolarity.Merged, resolution=PushBotRetinaResolution.Downsample64): # Validate number of timestamp bytes if not isinstance(polarity, PushBotRetinaPolarity): raise exceptions.SpynnakerException( "Pushbot retina polarity should be one of those defined in" " Polarity enumeration") if not isinstance(resolution, PushBotRetinaResolution): raise exceptions.SpynnakerException( "Pushbot retina resolution should be one of those defined in" " Resolution enumeration") # Cache resolution self._resolution = resolution # Build standard routing key from virtual chip coordinates self._routing_key = fixed_key self._retina_source_key = self._routing_key # Calculate number of neurons fixed_n_neurons = resolution.value.pixels**2 # If polarity is merged if polarity == PushBotRetinaPolarity.Merged: # Double number of neurons fixed_n_neurons *= 2 # We need to mask out two coordinates and a polarity bit mask_bits = (2 * resolution.value.coordinate_bits) + 1 # Otherwise else: # We need to mask out two coordinates mask_bits = 2 * resolution.value.coordinate_bits # If polarity is up, set polarity bit in routing key if polarity == PushBotRetinaPolarity.Up: polarity_bit = 1 << (2 * resolution.value.coordinate_bits) self._routing_key |= polarity_bit # Build routing mask self._routing_mask = ~((1 << mask_bits) - 1) & 0xFFFFFFFF AbstractVirtualVertex.__init__(self, fixed_n_neurons, spinnaker_link_id, max_atoms_per_core=fixed_n_neurons, label=label) AbstractSendMeMulticastCommandsVertex.__init__(self, self._get_commands()) AbstractProvidesOutgoingPartitionConstraints.__init__(self) if n_neurons != fixed_n_neurons and n_neurons is not None: print "Warning, the retina will have {} neurons".format( fixed_n_neurons)
def __init__(self, n_neurons, binary, label, max_atoms_per_core, machine_time_step, timescale_factor, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, model_name, neuron_model, input_type, synapse_type, threshold_type, additional_input=None, constraints=None): AbstractPartitionableVertex.__init__(self, n_neurons, label, max_atoms_per_core, constraints) AbstractDataSpecableVertex.__init__(self, machine_time_step, timescale_factor) AbstractSpikeRecordable.__init__(self) AbstractVRecordable.__init__(self) AbstractGSynRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractProvidesIncomingPartitionConstraints.__init__(self) AbstractPopulationInitializable.__init__(self) AbstractPopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) self._binary = binary self._label = label self._machine_time_step = machine_time_step self._timescale_factor = timescale_factor self._incoming_spike_buffer_size = incoming_spike_buffer_size if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size") self._model_name = model_name self._neuron_model = neuron_model self._input_type = input_type self._threshold_type = threshold_type self._additional_input = additional_input # Set up for recording self._spike_recorder = SpikeRecorder(machine_time_step) self._v_recorder = VRecorder(machine_time_step) self._gsyn_recorder = GsynRecorder(machine_time_step) self._spike_buffer_max_size = config.getint("Buffers", "spike_buffer_size") self._v_buffer_max_size = config.getint("Buffers", "v_buffer_size") self._gsyn_buffer_max_size = config.getint("Buffers", "gsyn_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._time_between_requests = config.getint("Buffers", "time_between_requests") self._minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") self._receive_buffer_host = config.get("Buffers", "receive_buffer_host") self._receive_buffer_port = config.getint("Buffers", "receive_buffer_port") self._enable_buffered_recording = config.getboolean( "Buffers", "enable_buffered_recording") # Set up synapse handling self._synapse_manager = SynapticManager(synapse_type, machine_time_step, ring_buffer_sigma, spikes_per_second) # bool for if state has changed. self._change_requires_mapping = True
def __init__(self, n_neurons, spike_times=default_parameters['spike_times'], port=none_pynn_default_parameters['port'], tag=none_pynn_default_parameters['tag'], ip_address=none_pynn_default_parameters['ip_address'], board_address=none_pynn_default_parameters['board_address'], max_on_chip_memory_usage_for_spikes_in_bytes=DEFAULT1, space_before_notification=none_pynn_default_parameters[ 'space_before_notification'], constraints=none_pynn_default_parameters['constraints'], label=none_pynn_default_parameters['label'], spike_recorder_buffer_size=none_pynn_default_parameters[ 'spike_recorder_buffer_size'], buffer_size_before_receive=none_pynn_default_parameters[ 'buffer_size_before_receive']): config = globals_variables.get_simulator().config self._ip_address = ip_address if ip_address is None: self._ip_address = config.get("Buffers", "receive_buffer_host") self._port = port if port is None: self._port = helpful_functions.read_config_int( config, "Buffers", "receive_buffer_port") if spike_times is None: spike_times = [] ReverseIpTagMultiCastSource.__init__( self, n_keys=n_neurons, label=label, constraints=constraints, max_atoms_per_core=( SpikeSourceArray._model_based_max_atoms_per_core), board_address=board_address, receive_port=None, receive_tag=None, virtual_key=None, prefix=None, prefix_type=None, check_keys=False, send_buffer_times=spike_times, send_buffer_partition_id=constants.SPIKE_PARTITION_ID, send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes, send_buffer_space_before_notify=space_before_notification, buffer_notification_ip_address=self._ip_address, buffer_notification_port=self._port, buffer_notification_tag=tag) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) ProvidesKeyToAtomMappingImpl.__init__(self) # handle recording self._spike_recorder = EIEIOSpikeRecorder() self._spike_recorder_buffer_size = spike_recorder_buffer_size self._buffer_size_before_receive = buffer_size_before_receive # Keep track of any previously generated buffers self._send_buffers = dict() self._spike_recording_region_size = None self._machine_vertices = list() # used for reset and rerun self._requires_mapping = True self._last_runtime_position = 0 self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = \ front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise exceptions.ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") if (self._max_on_chip_memory_usage_for_spikes < self._space_before_notification): self._space_before_notification =\ self._max_on_chip_memory_usage_for_spikes
def __init__(self, x_factor=X_FACTOR, y_factor=Y_FACTOR, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Breakout", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION, bricking=1, random_seed=rand_seed): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._x_factor = x_factor self._y_factor = y_factor self._width = width / x_factor self._height = height / y_factor self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(self._width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(self._height))) self._n_neurons = int(1 << (self._width_bits + self._height_bits + self._colour_bits)) self._bricking = bricking self._rand_seed = random_seed # print self._rand_seed # print "# width =", self._width # print "# width bits =", self._width_bits # print "# height =", self._height # print "# height bits =", self._height_bits # print "# neurons =", self._n_neurons # Define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # (static) resources required # technically as using OneAppOneMachine this is not necessary? resources_required = (self.BREAKOUT_REGION_BYTES + self.PARAM_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Breakout, self).__init__(BreakoutMachineVertex( vertex_slice, resources_required, constraints, self._label, self, x_factor, y_factor, width, height, colour_bits, incoming_spike_buffer_size, simulation_duration_ms, bricking, random_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")