def test_kwta_presentation(): """Tests one kwta presentation to half of the units, followed by another presentation to the second half.""" s = InputSample(8, 8, [[1] * 8] * 4 + [[0] * 8] * 4) kwta_presentation(Tns.p2, Tns.p1, s, 2) assert get_current_time() == 2 rates = get_rate_encoder(Tns.p1).get_rates() assert_array_less(rates[4:8], rates[0:4]) s = InputSample(8, 8, [[0] * 8] * 4 + [[1] * 8] * 4) kwta_presentation(Tns.p2, Tns.p1, s, 2) assert get_current_time() == 4 rates = get_rate_encoder(Tns.p1).get_rates() assert_array_less(rates[0:4], rates[4:8])
def kwta_epoch(trained_population, input_population, projection, input_samples, num_winners, neighbourhood_fn, presentation_duration, learning_rule, learning_rate, max_weight_value, trained_pop_max_rate=None, input_pop_max_rate=None): if trained_pop_max_rate == None: try: trained_pop_max_rate = trained_population.max_unit_rate except AttributeError: raise SimulationError("Could not find the trained population's max " "expected spiking rate per unit. It should be " "set as population.max_unit_rate.") if input_pop_max_rate == None: try: input_pop_max_rate = input_population.max_unit_rate except AttributeError: raise SimulationError("Could not find the input population's max " "expected spiking rate per unit. It should be " "set as population.max_unit_rate.") rate_enc = get_rate_encoder(trained_population) if neighbourhood_fn == None: neighbourhood_fn = lambda _, u : [(u, 1)] max_deltaw = 0 for s in input_samples: weights = get_weights(projection, max_weight=max_weight_value) kwta_presentation(trained_population, input_population, s, presentation_duration) argwinners = select_kwta_winners(trained_population, num_winners, presentation_duration) for argwin in argwinners: main_unit = rate_enc[argwin[0]][argwin[1]][1] # Adapt the weights for winner w and any activated neighbour for unit, factor in neighbourhood_fn(trained_population, main_unit): unit_index = trained_population.id_to_index(unit) # weight vector to the unit wv = weights.get_normalized_weights_vector(unit_index) # input to the unit (normalized, TODO: sigmoidal contrast-enhancement?) pre_syn_out = presynaptic_outputs(unit, projection, t=presentation_duration) pre_syn_out /= input_pop_max_rate # output of the unit (normalized, TODO: sigmoidal contrast-enhancement?) post_syn_act = rate_enc.get_rate_for_unit_index(unit_index, t=presentation_duration) post_syn_act /= trained_pop_max_rate # calculate and apply the new weight vector new_wv = learning_rule(pre_syn_out, post_syn_act, wv, learning_rate * factor) weights.set_normalized_weights_vector(unit_index, new_wv) # calculate the max synaptic weight delta max_deltaw = max(max_deltaw, max(numpy.abs(new_wv - wv))) set_weights(projection, weights) return max_deltaw
def schedule_output_rate_calculation(population, start_t=None, duration=None): """Schedules the recurrent calculation of the output rate of the given population. A new RectilinearOutputRateEncoderis created with default parameters if none is registered for this population. If no start_t is given, the current simulation time is used as start time. If no duration is given, the output rate encoder is active during the whole simulation.""" rore = get_rate_encoder(population) if start_t == None: start_t = get_current_time() end_t = None if duration != None: end_t = duration + start_t _schedule_output_rate_encoder(rore, start_t, end_t)
def select_kwta_winners(population, k, presentation_duration): """Returns the list of coordinates of the k most active units in the population for the the presentation duration to the current simulator time. Ties are broken using uniform random selection.""" argwinners = [] if k > 0: rate_enc = get_rate_encoder(population) rates = list(itertools.izip(splice(rate_enc.get_rates(t=presentation_duration)), infinite_xrange())) # we need to shuffle to randomize ties resolution numpy.random.shuffle(rates) winners = rates[0:k] heapq.heapify(winners) for r in rates[k:]: if r[0] > winners[0][0]: heapq.heapreplace(winners, r) argwinners = [(w[1] / rate_enc.shape[0], w[1] % rate_enc.shape[0]) for w in winners] return argwinners
def select_kwta_winners(population, k, presentation_duration): """Returns the list of coordinates of the k most active units in the population for the the presentation duration to the current simulator time. Ties are broken using uniform random selection.""" argwinners = [] if k > 0: rate_enc = get_rate_encoder(population) rates = list( itertools.izip(splice(rate_enc.get_rates(t=presentation_duration)), infinite_xrange())) # we need to shuffle to randomize ties resolution numpy.random.shuffle(rates) winners = rates[0:k] heapq.heapify(winners) for r in rates[k:]: if r[0] > winners[0][0]: heapq.heapreplace(winners, r) argwinners = [(w[1] / rate_enc.shape[0], w[1] % rate_enc.shape[0]) for w in winners] return argwinners
def kwta_epoch(trained_population, input_population, projection, input_samples, num_winners, neighbourhood_fn, presentation_duration, learning_rule, learning_rate, max_weight_value, trained_pop_max_rate=None, input_pop_max_rate=None): if trained_pop_max_rate == None: try: trained_pop_max_rate = trained_population.max_unit_rate except AttributeError: raise SimulationError( "Could not find the trained population's max " "expected spiking rate per unit. It should be " "set as population.max_unit_rate.") if input_pop_max_rate == None: try: input_pop_max_rate = input_population.max_unit_rate except AttributeError: raise SimulationError( "Could not find the input population's max " "expected spiking rate per unit. It should be " "set as population.max_unit_rate.") rate_enc = get_rate_encoder(trained_population) if neighbourhood_fn == None: neighbourhood_fn = lambda _, u: [(u, 1)] max_deltaw = 0 for s in input_samples: weights = get_weights(projection, max_weight=max_weight_value) kwta_presentation(trained_population, input_population, s, presentation_duration) argwinners = select_kwta_winners(trained_population, num_winners, presentation_duration) for argwin in argwinners: main_unit = rate_enc[argwin[0]][argwin[1]][1] # Adapt the weights for winner w and any activated neighbour for unit, factor in neighbourhood_fn(trained_population, main_unit): unit_index = trained_population.id_to_index(unit) # weight vector to the unit wv = weights.get_normalized_weights_vector(unit_index) # input to the unit (normalized, TODO: sigmoidal contrast-enhancement?) pre_syn_out = presynaptic_outputs(unit, projection, t=presentation_duration) pre_syn_out /= input_pop_max_rate # output of the unit (normalized, TODO: sigmoidal contrast-enhancement?) post_syn_act = rate_enc.get_rate_for_unit_index( unit_index, t=presentation_duration) post_syn_act /= trained_pop_max_rate # calculate and apply the new weight vector new_wv = learning_rule(pre_syn_out, post_syn_act, wv, learning_rate * factor) weights.set_normalized_weights_vector(unit_index, new_wv) # calculate the max synaptic weight delta max_deltaw = max(max_deltaw, max(numpy.abs(new_wv - wv))) set_weights(projection, weights) return max_deltaw