def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._reward_delay = reward_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, arms=default_parameters['arms'], reward_delay=default_parameters['reward_delay'], reward_based=default_parameters['reward_based'], rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], stochastic=default_parameters['stochastic'], constant_input=default_parameters['constant_input'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._arms = arms self._no_arms = len(arms) self._n_neurons = self._no_arms self._rand_seed = rand_seed self._reward_delay = reward_delay self._reward_based = reward_based self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._constant_input = constant_input # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) resources_required = ( self.BANDIT_REGION_BYTES + self.BASE_ARMS_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Bandit, self).__init__( BanditMachineVertex( vertex_slice, resources_required, constraints, label, self, arms, reward_delay, reward_based, rate_on, rate_off, stochastic, constant_input, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Bandit", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = width self._height = height self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(height))) self._n_neurons = ( 1 << (self._width_bits + self._height_bits + self._colour_bits + 1)) # used to define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], pop_size=default_parameters['pop_size'], prob_command=default_parameters['prob_command'], prob_in_change=default_parameters['prob_in_change'], time_period=default_parameters['time_period'], stochastic=default_parameters['stochastic'], reward=default_parameters['reward'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._reward = reward self._pop_size = pop_size self._prob_command = prob_command self._prob_in_change = prob_in_change self._n_neurons = pop_size * 4 self._rand_seed = rand_seed self._time_period = time_period # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # technically as using OneAppOneMachine this is not necessary? resources_required = ( self.RECALL_REGION_BYTES + self.DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Recall, self).__init__( RecallMachineVertex( vertex_slice, resources_required, constraints, label, self, rate_on, rate_off, pop_size, prob_command, prob_in_change, time_period, stochastic, reward, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, truth_table, input_sequence, rate_on=default_parameters['rate_on'], rate_off=default_parameters['rate_off'], score_delay=default_parameters['score_delay'], stochastic=default_parameters['stochastic'], constraints=default_parameters['constraints'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration'], rand_seed=default_parameters['random_seed']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._truth_table = truth_table self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._input_sequence = input_sequence self._no_inputs = len(input_sequence) if self._no_inputs != numpy.log2(len(self._truth_table)): try: raise Bad_Table('table and input sequence are not compatible') except Bad_Table as e: print("ERROR: ", e) self._n_neurons = self._no_inputs self._rand_seed = rand_seed self._score_delay = score_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # (static) resources required # technically as using OneAppOneMachine this is not necessary? resources_required = (self.LOGIC_REGION_BYTES + self.BASE_DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Logic, self).__init__(LogicMachineVertex( vertex_slice, resources_required, constraints, label, self, truth_table, input_sequence, rate_on, rate_off, score_delay, stochastic, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, constraints=default_parameters['constraints'], encoding=default_parameters['encoding'], time_increment=default_parameters['time_increment'], pole_length=default_parameters['pole_length'], pole_angle=default_parameters['pole_angle'], pole2_length=default_parameters['pole2_length'], pole2_angle=default_parameters['pole2_angle'], reward_based=default_parameters['reward_based'], force_increments=default_parameters['force_increments'], max_firing_rate=default_parameters['max_firing_rate'], number_of_bins=default_parameters['number_of_bins'], central=default_parameters['central'], rand_seed=default_parameters['rand_seed'], bin_overlap=default_parameters['bin_overlap'], tau_force=default_parameters['tau_force'], label=default_parameters['label'], incoming_spike_buffer_size=default_parameters[ 'incoming_spike_buffer_size'], simulation_duration_ms=default_parameters['duration']): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._encoding = encoding # Pass in variables self._pole_length = pole_length self._pole_angle = pole_angle self._pole2_length = pole2_length self._pole2_angle = pole2_angle self._force_increments = force_increments # for rate based it's only 1 neuron per metric # (position, angle, velocity of both) self._n_neurons = 6 * number_of_bins self._time_increment = time_increment self._reward_based = reward_based self._max_firing_rate = max_firing_rate self._number_of_bins = number_of_bins self._central = central self._rand_seed = rand_seed self._bin_overlap = bin_overlap self._tau_force = tau_force # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # technically as using OneAppOneMachine this is not necessary? resources_required = (self.PENDULUM_REGION_BYTES + self.BASE_DATA_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(DoublePendulum, self).__init__(DoublePendulumMachineVertex( vertex_slice, resources_required, constraints, label, self, encoding, time_increment, pole_length, pole_angle, pole2_length, pole2_angle, reward_based, force_increments, max_firing_rate, number_of_bins, central, bin_overlap, tau_force, incoming_spike_buffer_size, simulation_duration_ms, rand_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")
def __init__(self, n_neurons, spikes_per_second=AbstractPopulationVertex. none_pynn_default_parameters['spikes_per_second'], ring_buffer_sigma=AbstractPopulationVertex. none_pynn_default_parameters['ring_buffer_sigma'], incoming_spike_buffer_size=None, constraints=None, label="Convolution core", src_width=WIDTH_PIXELS, src_height=HEIGHT_PIXELS, src_polarity_bits=POLARITY_BITS, polarity=POLARITY, sample_step_width=SAMPLE_STEP_WIDTH, sample_step_height=SAMPLE_STEP_HEIGHT, kernel=KERNEL, threshold=THRESHOLD, use_xyp_or_pyx=KEY_FORMATS.USE_XYP.value, time_window=TIME_WINDOW): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._width = numpy.uint32(src_width) #1 self._height = numpy.uint32(src_height) #2 self._polarity_bits = numpy.uint32(src_polarity_bits) #3 self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(src_width))) #4 self._height_bits = numpy.uint32(numpy.ceil( numpy.log2(src_height))) #5 self._kernel_width = numpy.uint32(kernel.shape[1]) #6 self._kernel_height = numpy.uint32(kernel.shape[0]) #7 self._step_width = numpy.uint32( numpy.round(numpy.log2(sample_step_width))) #8 self._step_height = numpy.uint32( numpy.round(numpy.log2(sample_step_height))) #9 self._threshold = numpy.float16(threshold) #10 self._use_xyp_or_pyx = numpy.uint32(use_xyp_or_pyx) #11 self._time_window = numpy.uint32(time_window) #12 self._start_width = kernel.shape[1] // 2 self._start_height = kernel.shape[0] // 2 self._out_width = subsamp_size(self._start_width, self._width, 1 << self._step_width) #13 self._out_height = subsamp_size(self._start_height, self._height, 1 << self._step_height) #14 self._out_width_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_width))) #15 self._out_height_bits = numpy.uint32( numpy.ceil(numpy.log2(self._out_height))) #16 self._polarity = polarity #17 self._n_neurons = (1 << (self._out_width_bits + self._out_height_bits + self._polarity_bits)) self._kernel = numpy.float16(kernel) # M #params, kernel, key self._memory_size_in_bytes = (17 + kernel.size + 1) * 4 # Superclasses ApplicationVertex.__init__(self, label, constraints, self.n_atoms) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True # get config from simulator config = globals_variables.get_simulator().config if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = config.getint( "Simulation", "incoming_spike_buffer_size")
def __init__(self, x_factor=X_FACTOR, y_factor=Y_FACTOR, width=WIDTH_PIXELS, height=HEIGHT_PIXELS, colour_bits=COLOUR_BITS, constraints=None, label="Breakout", incoming_spike_buffer_size=None, simulation_duration_ms=MAX_SIM_DURATION, bricking=1, random_seed=rand_seed): # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._x_factor = x_factor self._y_factor = y_factor self._width = width / x_factor self._height = height / y_factor self._colour_bits = colour_bits self._width_bits = numpy.uint32(numpy.ceil(numpy.log2(self._width))) self._height_bits = numpy.uint32(numpy.ceil(numpy.log2(self._height))) self._n_neurons = int(1 << (self._width_bits + self._height_bits + self._colour_bits)) self._bricking = bricking self._rand_seed = random_seed # print self._rand_seed # print "# width =", self._width # print "# width bits =", self._width_bits # print "# height =", self._height # print "# height bits =", self._height_bits # print "# neurons =", self._n_neurons # Define size of recording region self._recording_size = int((simulation_duration_ms / 10000.) * 4) # (static) resources required # technically as using OneAppOneMachine this is not necessary? resources_required = (self.BREAKOUT_REGION_BYTES + self.PARAM_REGION_BYTES + self._recording_size) vertex_slice = Slice(0, self._n_neurons - 1) # Superclasses super(Breakout, self).__init__(BreakoutMachineVertex( vertex_slice, resources_required, constraints, self._label, self, x_factor, y_factor, width, height, colour_bits, incoming_spike_buffer_size, simulation_duration_ms, bricking, random_seed), label=label, constraints=constraints) AbstractProvidesOutgoingPartitionConstraints.__init__(self) SimplePopulationSettable.__init__(self) AbstractChangableAfterRun.__init__(self) AbstractAcceptsIncomingSynapses.__init__(self) self._change_requires_mapping = True if incoming_spike_buffer_size is None: self._incoming_spike_buffer_size = get_config_int( "Simulation", "incoming_spike_buffer_size")