def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], reward_based=default_parameters['reward_based'], rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], stochastic=default_parameters['stochastic'], constant_input=default_parameters['constant_input'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._rand_seed = rand_seed self._reward_delay = reward_delay self._reward_based = reward_based self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._constant_input = constant_input # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) resources_required = ( self.BANDIT_REGION_BYTES + self.BASE_ARMS_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Bandit, self).__init__( BanditMachineVertex( vertex_slice, resources_required, constraints, label, self, arms, reward_delay, reward_based, rate_on, rate_off, stochastic, constant_input, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._reward_delay = reward_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, constraints=none_pynn_default_parameters['constraints'], label=none_pynn_default_parameters['label'], rate=default_parameters['rate'], start=default_parameters['start'], duration=default_parameters['duration'], seed=none_pynn_default_parameters['seed']): ApplicationVertex.__init__(self, label, constraints, self._model_based_max_atoms_per_core) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) AbstractChangableAfterRun.__init__(self) SimplePopulationSettable.__init__(self) ProvidesKeyToAtomMappingImpl.__init__(self) config = globals_variables.get_simulator().config # atoms params self._n_atoms = n_neurons self._seed = None # check for changes parameters self._change_requires_mapping = True self._change_requires_neuron_parameters_reload = False # Store the parameters self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons) self._start = utility_calls.convert_param_to_numpy(start, n_neurons) self._duration = utility_calls.convert_param_to_numpy( duration, n_neurons) self._time_to_spike = utility_calls.convert_param_to_numpy( 0, n_neurons) self._rng = numpy.random.RandomState(seed) self._machine_time_step = None # Prepare for recording, and to get spikes self._spike_recorder = MultiSpikeRecorder() self._time_between_requests = config.getint("Buffers", "time_between_requests") self._receive_buffer_host = config.get("Buffers", "receive_buffer_host") self._receive_buffer_port = helpful_functions.read_config_int( config, "Buffers", "receive_buffer_port") self._minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") spike_buffer_max_size = 0 self._buffer_size_before_receive = None if config.getboolean("Buffers", "enable_buffered_recording"): spike_buffer_max_size = config.getint("Buffers", "spike_buffer_size") self._buffer_size_before_receive = config.getint( "Buffers", "buffer_size_before_receive") self._maximum_sdram_for_buffering = [spike_buffer_max_size]
def __init__(self, n_neurons, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Bandit", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = width self._height = height self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(height))) self._n_neurons = ( 1 << (self._width_bits + self._height_bits + self._colour_bits + 1)) # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], pop_size=default_parameters['pop_size'], prob_command=default_parameters['prob_command'], prob_in_change=default_parameters['prob_in_change'], time_period=default_parameters['time_period'], stochastic=default_parameters['stochastic'], reward=default_parameters['reward'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._reward = reward self._pop_size = pop_size self._prob_command = prob_command self._prob_in_change = prob_in_change self._n_neurons = pop_size * 4 self._rand_seed = rand_seed self._time_period = time_period # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # technically as using OneAppOneMachine this is not necessary? resources_required = ( self.RECALL_REGION_BYTES + self.DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Recall, self).__init__( RecallMachineVertex( vertex_slice, resources_required, constraints, label, self, rate_on, rate_off, pop_size, prob_command, prob_in_change, time_period, stochastic, reward, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__( self, n_neurons, machine_time_step, timescale_factor, spike_times=None, port=None, tag=None, ip_address=None, board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=(constants.SPIKE_BUFFER_SIZE_BUFFERING_IN), space_before_notification=640, constraints=None, label="SpikeSourceArray", spike_recorder_buffer_size=(constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT), buffer_size_before_receive=(constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE), ): self._ip_address = ip_address if ip_address is None: self._ip_address = config.get("Buffers", "receive_buffer_host") self._port = port if port is None: self._port = config.getint("Buffers", "receive_buffer_port") if spike_times is None: spike_times = [] ReverseIpTagMultiCastSource.__init__( self, n_keys=n_neurons, machine_time_step=machine_time_step, timescale_factor=timescale_factor, label=label, constraints=constraints, max_atoms_per_core=(SpikeSourceArray._model_based_max_atoms_per_core), board_address=board_address, receive_port=None, receive_sdp_port=None, receive_tag=None, virtual_key=None, prefix=None, prefix_type=None, check_keys=False, send_buffer_times=spike_times, send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes, send_buffer_space_before_notify=space_before_notification, send_buffer_notification_ip_address=self._ip_address, send_buffer_notification_port=self._port, send_buffer_notification_tag=tag, ) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingEdgeConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractMappable.__init__(self) AbstractHasFirstMachineTimeStep.__init__(self) # handle recording self._spike_recorder = EIEIOSpikeRecorder(machine_time_step) self._spike_recorder_buffer_size = spike_recorder_buffer_size self._buffer_size_before_receive = buffer_size_before_receive # Keep track of any previously generated buffers self._send_buffers = dict() self._spike_recording_region_size = None self._partitioned_vertices = list() self._partitioned_vertices_current_max_buffer_size = dict() # used for reset and rerun self._requires_mapping = True self._last_runtime_position = 0 self._max_on_chip_memory_usage_for_spikes = max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise exceptions.ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again" ) if self._max_on_chip_memory_usage_for_spikes < self._space_before_notification: self._space_before_notification = self._max_on_chip_memory_usage_for_spikes
def __init__(self, constraints=default_parameters['constraints'], encoding=default_parameters['encoding'], time_increment=default_parameters['time_increment'], pole_length=default_parameters['pole_length'], pole_angle=default_parameters['pole_angle'], pole2_length=default_parameters['pole2_length'], pole2_angle=default_parameters['pole2_angle'], reward_based=default_parameters['reward_based'], force_increments=default_parameters['force_increments'], max_firing_rate=default_parameters['max_firing_rate'], number_of_bins=default_parameters['number_of_bins'], central=default_parameters['central'], rand_seed=default_parameters['rand_seed'], bin_overlap=default_parameters['bin_overlap'], tau_force=default_parameters['tau_force'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._encoding = encoding # Pass in variables self._pole_length = pole_length self._pole_angle = pole_angle self._pole2_length = pole2_length self._pole2_angle = pole2_angle self._force_increments = force_increments # for rate based it's only 1 neuron per metric # (position, angle, velocity of both) self._n_neurons = 6 * number_of_bins self._time_increment = time_increment self._reward_based = reward_based self._max_firing_rate = max_firing_rate self._number_of_bins = number_of_bins self._central = central self._rand_seed = rand_seed self._bin_overlap = bin_overlap self._tau_force = tau_force # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # technically as using OneAppOneMachine this is not necessary? resources_required = (self.PENDULUM_REGION_BYTES + self.BASE_DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(DoublePendulum, self).__init__(DoublePendulumMachineVertex( vertex_slice, resources_required, constraints, label, self, encoding, time_increment, pole_length, pole_angle, pole2_length, pole2_angle, reward_based, force_increments, max_firing_rate, number_of_bins, central, bin_overlap, tau_force, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, truth_table, input_sequence, rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], score_delay=default_parameters['score_delay'], stochastic=default_parameters['stochastic'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._truth_table = truth_table self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._input_sequence = input_sequence self._no_inputs = len(input_sequence) if self._no_inputs != numpy.log2(len(self._truth_table)): try: raise Bad_Table('table and input sequence are not compatible') except Bad_Table as e: print("ERROR: ", e) self._n_neurons = self._no_inputs self._rand_seed = rand_seed self._score_delay = score_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # (static) resources required # technically as using OneAppOneMachine this is not necessary? resources_required = (self.LOGIC_REGION_BYTES + self.BASE_DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Logic, self).__init__(LogicMachineVertex( vertex_slice, resources_required, constraints, label, self, truth_table, input_sequence, rate_on, rate_off, score_delay, stochastic, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self): AbstractMappable.__init__(self) SimplePopulationSettable.__init__(self) self._change_requires_mapping = True
def set_value(self, key, value): SimplePopulationSettable.set_value(self, key, value) self._change_requires_mapping = True
def __init__(self, n_neurons, spikes_per_second=AbstractPopulationVertex. none_pynn_default_parameters['spikes_per_second'], ring_buffer_sigma=AbstractPopulationVertex. none_pynn_default_parameters['ring_buffer_sigma'], incoming_spike_buffer_size=None, constraints=None, label="Convolution core", src_width=WIDTH_PIXELS, src_height=HEIGHT_PIXELS, src_polarity_bits=POLARITY_BITS, polarity=POLARITY, sample_step_width=SAMPLE_STEP_WIDTH, sample_step_height=SAMPLE_STEP_HEIGHT, kernel=KERNEL, threshold=THRESHOLD, use_xyp_or_pyx=KEY_FORMATS.USE_XYP.value, time_window=TIME_WINDOW): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = numpy.uint32(src_width) #1 self._height = numpy.uint32(src_height) #2 self._polarity_bits = numpy.uint32(src_polarity_bits) #3 self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(src_width))) #4 self._height_bits = numpy.uint32(numpy.ceil( numpy.log2(src_height))) #5 self._kernel_width = numpy.uint32(kernel.shape[1]) #6 self._kernel_height = numpy.uint32(kernel.shape[0]) #7 self._step_width = numpy.uint32( numpy.round(numpy.log2(sample_step_width))) #8 self._step_height = numpy.uint32( numpy.round(numpy.log2(sample_step_height))) #9 self._threshold = numpy.float16(threshold) #10 self._use_xyp_or_pyx = numpy.uint32(use_xyp_or_pyx) #11 self._time_window = numpy.uint32(time_window) #12 self._start_width = kernel.shape[1] // 2 self._start_height = kernel.shape[0] // 2 self._out_width = subsamp_size(self._start_width, self._width, 1 << self._step_width) #13 self._out_height = subsamp_size(self._start_height, self._height, 1 << self._step_height) #14 self._out_width_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_width))) #15 self._out_height_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_height))) #16 self._polarity = polarity #17 self._n_neurons = (1 << (self._out_width_bits + self._out_height_bits + self._polarity_bits)) self._kernel = numpy.float16(kernel) # M #params, kernel, key self._memory_size_in_bytes = (17 + kernel.size + 1) * 4 # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, x_factor=X_FACTOR, y_factor=Y_FACTOR, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Breakout", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION, bricking=1, random_seed=rand_seed): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._x_factor = x_factor self._y_factor = y_factor self._width = width / x_factor self._height = height / y_factor self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(self._width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(self._height))) self._n_neurons = int(1 << (self._width_bits + self._height_bits + self._colour_bits)) self._bricking = bricking self._rand_seed = random_seed # print self._rand_seed # print "# width =", self._width # print "# width bits =", self._width_bits # print "# height =", self._height # print "# height bits =", self._height_bits # print "# neurons =", self._n_neurons # Define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # (static) resources required # technically as using OneAppOneMachine this is not necessary? resources_required = (self.BREAKOUT_REGION_BYTES + self.PARAM_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Breakout, self).__init__(BreakoutMachineVertex( vertex_slice, resources_required, constraints, self._label, self, x_factor, y_factor, width, height, colour_bits, incoming_spike_buffer_size, simulation_duration_ms, bricking, random_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, spike_times=default_parameters['spike_times'], port=none_pynn_default_parameters['port'], tag=none_pynn_default_parameters['tag'], ip_address=none_pynn_default_parameters['ip_address'], board_address=none_pynn_default_parameters['board_address'], max_on_chip_memory_usage_for_spikes_in_bytes=DEFAULT1, space_before_notification=none_pynn_default_parameters[ 'space_before_notification'], constraints=none_pynn_default_parameters['constraints'], label=none_pynn_default_parameters['label'], spike_recorder_buffer_size=none_pynn_default_parameters[ 'spike_recorder_buffer_size'], buffer_size_before_receive=none_pynn_default_parameters[ 'buffer_size_before_receive']): config = globals_variables.get_simulator().config self._ip_address = ip_address if ip_address is None: self._ip_address = config.get("Buffers", "receive_buffer_host") self._port = port if port is None: self._port = helpful_functions.read_config_int( config, "Buffers", "receive_buffer_port") if spike_times is None: spike_times = [] ReverseIpTagMultiCastSource.__init__( self, n_keys=n_neurons, label=label, constraints=constraints, max_atoms_per_core=( SpikeSourceArray._model_based_max_atoms_per_core), board_address=board_address, receive_port=None, receive_tag=None, virtual_key=None, prefix=None, prefix_type=None, check_keys=False, send_buffer_times=spike_times, send_buffer_partition_id=constants.SPIKE_PARTITION_ID, send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes, send_buffer_space_before_notify=space_before_notification, buffer_notification_ip_address=self._ip_address, buffer_notification_port=self._port, buffer_notification_tag=tag) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) ProvidesKeyToAtomMappingImpl.__init__(self) # handle recording self._spike_recorder = EIEIOSpikeRecorder() self._spike_recorder_buffer_size = spike_recorder_buffer_size self._buffer_size_before_receive = buffer_size_before_receive # Keep track of any previously generated buffers self._send_buffers = dict() self._spike_recording_region_size = None self._machine_vertices = list() # used for reset and rerun self._requires_mapping = True self._last_runtime_position = 0 self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = \ front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise exceptions.ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") if (self._max_on_chip_memory_usage_for_spikes < self._space_before_notification): self._space_before_notification =\ self._max_on_chip_memory_usage_for_spikes
def set_value(self, key, value): SimplePopulationSettable.set_value(self, key, value) self._remapping_required = True
def __init__(self, n_neurons, constraints, label, model, profile, time_scale_factor): # Superclasses ApplicationVertex.__init__(self, label, constraints) AbstractAcceptsIncomingSynapses.__init__(self) SimplePopulationSettable.__init__(self) HandOverToVertex.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractSpikeRecordable.__init__(self) AbstractNeuronRecordable.__init__(self) AbstractProvidesNKeysForPartition.__init__(self) self._model = model self._profile = profile self._remapping_required = True self._synapse_dynamics = None self._n_fibres_per_ihc = None self._n_group_tree_rows = None self._ihcan_vertices = list() self._drnl_vertices = list() self._final_agg_vertices = list() self.__synapse_manager = SynapticManager( self.N_SYNAPSE_TYPES, None, None, globals_variables.get_simulator().config) # calculate n fibres per ihcan core sample_time = time_scale_factor / self._model.fs # how many channels self._n_channels = int(self.get_out_going_size() / self._model.n_fibres_per_ihc) # process pole freqs self._pole_freqs = self._process_pole_freqs() # how many fibres / atoms ran on each ihcan core self._n_fibres_per_ihcan_core = self.fibres_per_ihcan_core( sample_time, self._model.n_fibres_per_ihc) # process all the other internal numbers atoms_per_row = self.process_internal_numbers() # read in param file if needed self._process_param_file(atoms_per_row) # recording stuff self._drnl_neuron_recorder = NeuronRecorder( DRNLMachineVertex.RECORDABLES, DRNLMachineVertex.get_matrix_scalar_data_types(), DRNLMachineVertex.get_matrix_output_data_types(), self._n_dnrls) self._ihcan_neuron_recorder = NeuronRecorder( IHCANMachineVertex.RECORDABLES, IHCANMachineVertex.get_matrix_scalar_data_types(), IHCANMachineVertex.get_matrix_output_data_types(), self._n_dnrls * self._n_fibres_per_ihc) # bool for if state has changed. self._change_requires_mapping = True self._change_requires_neuron_parameters_reload = False self._change_requires_data_generation = False self._has_reset_last = True # safety check if self._n_atoms != n_neurons: raise ConfigurationException( self.N_NEURON_ERROR.format(n_neurons, self._n_atoms)) # safety stuff if (self._model.fs / time_scale_factor > self.MAX_TIME_SCALE_FACTOR_RATIO): raise Exception(self.FREQUENCY_ERROR) # write timer period self._timer_period = (MICRO_TO_SECOND_CONVERSION * (self._model.seq_size / self._model.fs))
def __init__( self, n_neurons, machine_time_step, timescale_factor, spike_times=None, port=None, tag=None, ip_address=None, board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=( constants.SPIKE_BUFFER_SIZE_BUFFERING_IN), space_before_notification=640, constraints=None, label="SpikeSourceArray", spike_recorder_buffer_size=( constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT), buffer_size_before_receive=( constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE)): self._ip_address = ip_address if ip_address is None: self._ip_address = config.get("Buffers", "receive_buffer_host") self._port = port if port is None: self._port = config.getint("Buffers", "receive_buffer_port") if spike_times is None: spike_times = [] self._minimum_sdram_for_buffering = config.getint( "Buffers", "minimum_buffer_sdram") self._using_auto_pause_and_resume = config.getboolean( "Buffers", "use_auto_pause_and_resume") ReverseIpTagMultiCastSource.__init__( self, n_keys=n_neurons, machine_time_step=machine_time_step, timescale_factor=timescale_factor, label=label, constraints=constraints, max_atoms_per_core=(SpikeSourceArray. _model_based_max_atoms_per_core), board_address=board_address, receive_port=None, receive_sdp_port=None, receive_tag=None, virtual_key=None, prefix=None, prefix_type=None, check_keys=False, send_buffer_times=spike_times, send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes, send_buffer_space_before_notify=space_before_notification, send_buffer_notification_ip_address=self._ip_address, send_buffer_notification_port=self._port, send_buffer_notification_tag=tag) AbstractSpikeRecordable.__init__(self) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractHasFirstMachineTimeStep.__init__(self) # handle recording self._spike_recorder = EIEIOSpikeRecorder(machine_time_step) self._spike_recorder_buffer_size = spike_recorder_buffer_size self._buffer_size_before_receive = buffer_size_before_receive # Keep track of any previously generated buffers self._send_buffers = dict() self._spike_recording_region_size = None self._partitioned_vertices = list() self._partitioned_vertices_current_max_buffer_size = dict() # used for reset and rerun self._requires_mapping = True self._last_runtime_position = 0 self._max_on_chip_memory_usage_for_spikes = \ max_on_chip_memory_usage_for_spikes_in_bytes self._space_before_notification = space_before_notification if self._max_on_chip_memory_usage_for_spikes is None: self._max_on_chip_memory_usage_for_spikes = \ front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP # check the values do not conflict with chip memory limit if self._max_on_chip_memory_usage_for_spikes < 0: raise exceptions.ConfigurationException( "The memory usage on chip is either beyond what is supportable" " on the spinnaker board being supported or you have requested" " a negative value for a memory usage. Please correct and" " try again") if (self._max_on_chip_memory_usage_for_spikes < self._space_before_notification): self._space_before_notification =\ self._max_on_chip_memory_usage_for_spikes
def set_value(self, key, value): SimplePopulationSettable.set_value(self, key, value) self._change_requires_neuron_parameters_reload = True
def __init__(self): AbstractChangableAfterRun.__init__(self) SimplePopulationSettable.__init__(self) self._change_requires_mapping = True