def test_probeable(): net = nengo.Network() def check_learning_rule(learning_rule_type, expected, net=net): assert learning_rule_type.probeable == expected post = net.e if isinstance(learning_rule_type, Voja) else net.n transform = np.ones( (1, 10)) if isinstance(learning_rule_type, Voja) else 1.0 conn = nengo.Connection(net.n, post, transform=transform, learning_rule_type=learning_rule_type) assert conn.learning_rule.probeable == expected with net: net.e = nengo.Ensemble(10, 1) net.n = net.e.neurons check_learning_rule(nengo.PES(), ("error", "activities", "delta")) check_learning_rule(nengo.RLS(), ("pre_filtered", "error", "delta", "inv_gamma")) check_learning_rule( nengo.BCM(), ("theta", "pre_filtered", "post_filtered", "delta")) check_learning_rule(nengo.Oja(), ("pre_filtered", "post_filtered", "delta")) check_learning_rule(nengo.Voja(), ("post_filtered", "scaled_encoders", "delta"))
def test_encoder_learning_undecoded_error(): with nengo.Network(): with pytest.raises(ValidationError, match="encoders are not used"): nengo.Connection( nengo.Ensemble(2, 2), nengo.Ensemble(2, 2), solver=nengo.solvers.LstsqL2(weights=True), learning_rule_type=nengo.Voja(), )
def test_encoder_learning_post_errors(): with nengo.Network(): ens = nengo.Ensemble(2, 2) with pytest.raises(ValidationError, match="'post' must be of type 'Ensemble'"): nengo.Connection( ens, nengo.Node(size_in=2), learning_rule_type=nengo.Voja(), ) with pytest.raises(ValidationError, match="encoders are not used"): nengo.Connection( ens, ens, solver=nengo.solvers.LstsqL2(weights=True), learning_rule_type=nengo.Voja(), )
def test_encoder_learning_undecoded_error(Simulator): with nengo.Network() as net: nengo.Connection( nengo.Ensemble(2, 2), nengo.Ensemble(2, 2), solver=nengo.solvers.LstsqL2(weights=True), learning_rule_type=nengo.Voja(), ) with pytest.raises(ValueError, match="connection must be decoded.*encoder learn"): with Simulator(net): pass
def test_encoder_learnt_sink(self): # Create a network and standard model with nengo.Network(): a = nengo.Ensemble(100, 2) b = nengo.Ensemble(100, 2) a_b = nengo.Connection(a, b) a_b.learning_rule_type = nengo.Voja() # Create a model with the Ensemble for b in it model = builder.Model() b_ens = operators.EnsembleLIF(b) model.object_operators[b] = b_ens sink = ensemble.get_ensemble_sink(model, a_b) assert sink.target.obj is b_ens assert sink.target.port is ensemble.EnsembleInputPort.learnt
def test_probe_voja_scaled_encoders(self): # Create a network and standard model with nengo.Network() as net: a = nengo.Ensemble(100, 2) b = nengo.Ensemble(100, 2) a_b = nengo.Connection(a, b) a_b.learning_rule_type = nengo.Voja() p = nengo.Probe(a_b.learning_rule, "scaled_encoders") # Create an empty model to build the probe into model = builder.Model() model.build(net) # Assert that we added the probe to the list of local probes and # nothing else assert model.object_operators[b].local_probes == [p] assert len(model.object_operators) == 2
def test_mark_signals(): with nengo.Network() as net: ens0 = nengo.Ensemble(10, 1, neuron_type=nengo.LIF()) ens1 = nengo.Ensemble(20, 1, neuron_type=nengo.Direct()) ens2 = nengo.Ensemble(30, 1) conn0 = nengo.Connection(ens0, ens1) conn1 = nengo.Connection(ens0, ens1, learning_rule_type=nengo.PES()) conn2 = nengo.Connection(ens0, ens2, learning_rule_type=nengo.Voja()) nengo.Probe(ens2) model = nengo.builder.Model() model.build(net) tg = tensor_graph.TensorGraph(model, None, None, 1, None, utils.NullProgressBar(), None) tg.mark_signals() assert model.sig[ens0]["encoders"].trainable assert model.sig[ens1]["encoders"].trainable assert not model.sig[ens2]["encoders"].trainable assert model.sig[ens0.neurons]["bias"].trainable assert model.sig[ens2.neurons]["bias"].trainable assert model.sig[conn0]["weights"].trainable assert not model.sig[conn1]["weights"].trainable assert model.sig[conn2]["weights"].trainable trainables = ( model.sig[ens0]["encoders"], model.sig[ens1]["encoders"], model.sig[ens0.neurons]["bias"], model.sig[ens2.neurons]["bias"], model.sig[conn0]["weights"], model.sig[conn2]["weights"], ) for op in model.operators: for sig in op.all_signals: if sig in trainables: assert sig.trainable else: assert not sig.trainable
def test_encoder_learning_rule_sink(self): """Test that sinks for most connections into Ensembles do nothing special. """ # Create a network and standard model with nengo.Network(): a = nengo.Ensemble(100, 2) b = nengo.Ensemble(100, 2) e = nengo.Ensemble(100, 1) a_b = nengo.Connection(a, b) a_b.learning_rule_type = nengo.Voja() e_l = nengo.Connection(e, a_b.learning_rule) # Create a model with the Ensemble for b in it model = builder.Model() b_ens = operators.EnsembleLIF(b) model.object_operators[b] = b_ens # Get the sink, check that an appropriate target is return sink = ensemble.get_learning_rule_sink(model, e_l) assert sink.target.obj is b_ens assert sink.target.port is a_b.learning_rule
def test_mark_signals_config(): with nengo.Network() as net: config.configure_settings(trainable=None) net.config[nengo.Ensemble].trainable = False with nengo.Network(): # check that object in subnetwork inherits config from parent ens0 = nengo.Ensemble(10, 1, label="ens0") # check that ens.neurons can be set independent of ens net.config[ens0.neurons].trainable = True with nengo.Network(): with nengo.Network(): # check that subnetworks can override parent configs config.configure_settings(trainable=True) ens1 = nengo.Ensemble(10, 1, label="ens1") with nengo.Network(): # check that subnetworks inherit the trainable settings # from parent networks ens3 = nengo.Ensemble(10, 1, label="ens3") # check that instances can be set independent of class ens2 = nengo.Ensemble(10, 1, label="ens2") net.config[ens2].trainable = True model = nengo.builder.Model() model.build(net) progress = utils.NullProgressBar() tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None) tg.mark_signals() assert not model.sig[ens0]["encoders"].trainable assert model.sig[ens0.neurons]["bias"].trainable assert model.sig[ens1]["encoders"].trainable assert model.sig[ens2]["encoders"].trainable assert model.sig[ens3]["encoders"].trainable # check that learning rule connections can be manually set to True with nengo.Network() as net: config.configure_settings(trainable=None) a = nengo.Ensemble(10, 1) b = nengo.Ensemble(10, 1) conn0 = nengo.Connection(a, b, learning_rule_type=nengo.PES()) net.config[conn0].trainable = True model = nengo.builder.Model() model.build(net) tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None) with pytest.warns(UserWarning): tg.mark_signals() assert model.sig[conn0]["weights"].trainable with nengo.Network() as net: config.configure_settings(trainable=None) a = nengo.Node([0]) ens = nengo.Ensemble(10, 1) nengo.Connection(a, ens, learning_rule_type=nengo.Voja()) net.config[nengo.Ensemble].trainable = True model = nengo.builder.Model() model.build(net) tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None) with pytest.warns(UserWarning): tg.mark_signals() assert model.sig[ens]["encoders"].trainable # check that models with no toplevel work sig = nengo.builder.signal.Signal([0]) op = nengo.builder.operator.Reset(sig, 1) model = nengo.builder.Model() model.add_op(op) tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None) with pytest.warns(UserWarning): tg.mark_signals() assert not sig.trainable
def __init__( self, n_neurons, dimensions, intercepts=nengo.dists.Uniform(0, 0), voja_tau=0.005, voja_rate=1e-3, pes_rate=1e-3, label=None, inhibit_synapse=0.01, inhibit_strength=0.0005, seed=None, load_from=None, inhibit_all=False, ): super(LearningAssocMem, self).__init__(label=label) if load_from is not None: data = np.load(load_from) encoders = data['enc'] decoders = data['dec'] if seed is None: seed = int(data['seed']) else: assert seed == int(data['seed']) else: encoders = nengo.Default decoders = np.zeros((dimensions, n_neurons), dtype=float) self.seed = seed with self: self.mem = nengo.Ensemble( n_neurons=n_neurons, dimensions=dimensions, intercepts=intercepts, #max_rates=nengo.dists.Uniform(150,150), encoders=encoders, seed=seed, ) self.input = nengo.Node(None, size_in=dimensions) self.output = nengo.Node(None, size_in=dimensions) self.correct = nengo.Node(None, size_in=dimensions) if voja_rate > 0: learning_rule_type = nengo.Voja(post_tau=voja_tau, learning_rate=voja_rate) else: learning_rule_type = None self.conn_in = nengo.Connection( self.input, self.mem, learning_rule_type=learning_rule_type) if pes_rate > 0: learning_rule_type = nengo.PES(learning_rate=pes_rate) else: learning_rule_type = None self.conn_out = nengo.Connection( self.mem.neurons, self.output, transform=decoders, learning_rule_type=learning_rule_type, ) if pes_rate > 0: self.learn_control = nengo.Node(lambda t, x: x[:-1] if x[-1] < 0.5 else x[:-1] * 0, size_in=dimensions + 1) nengo.Connection( self.learn_control, self.conn_out.learning_rule, ) nengo.Connection(self.output, self.learn_control[:-1], synapse=None) nengo.Connection(self.correct, self.learn_control[:-1], transform=-1, synapse=None) self.stop_pes = nengo.Node(None, size_in=1) nengo.Connection(self.stop_pes, self.learn_control[-1], synapse=None) if inhibit_all: inhibit = nengo.Node(None, size_in=1) nengo.Connection(self.mem.neurons, inhibit, transform=inhibit_strength * np.ones( (1, n_neurons)), synapse=None) nengo.Connection(inhibit, self.mem.neurons, transform=-np.ones((self.mem.n_neurons, 1)), synapse=inhibit_synapse) else: nengo.Connection(self.mem.neurons, self.mem.neurons, transform=inhibit_strength * (np.eye(self.mem.n_neurons) - 1), synapse=inhibit_synapse)
def create_model(): num_items = 5 d_key = 2 d_value = 4 record_encoders = True rng = np.random.RandomState(seed=7) keys = nengo.dists.UniformHypersphere(surface=True).sample(num_items, d_key, rng=rng) values = nengo.dists.UniformHypersphere(surface=False).sample(num_items, d_value, rng=rng) intercept = (np.dot(keys, keys.T) - np.eye(num_items)).flatten().max() def cycle_array(x, period, dt=0.001): """Cycles through the elements""" i_every = int(round(period / dt)) if i_every != period / dt: raise ValueError("dt (%s) does not divide period (%s)" % (dt, period)) def f(t): i = int(round((t - dt) / dt)) # t starts at dt return x[(i / i_every) % len(x)] return f # Model constants n_neurons = 200 dt = 0.001 period = 0.3 T = period * num_items * 2 # Model network model = nengo.Network() with model: # Create the inputs/outputs stim_keys = nengo.Node(output=cycle_array(keys, period, dt), label="stim_keys") stim_values = nengo.Node(output=cycle_array(values, period, dt), label="stim_values") learning = nengo.Node(output=lambda t: -int(t >= T / 2), label="learning") recall = nengo.Node(size_in=d_value, label="recall") # Create the memory memory = nengo.Ensemble(n_neurons, d_key, intercepts=[intercept] * n_neurons, label="memory") # Learn the encoders/keys voja = nengo.Voja(post_tau=None, learning_rate=5e-2) conn_in = nengo.Connection(stim_keys, memory, synapse=None, learning_rule_type=voja) nengo.Connection(learning, conn_in.learning_rule, synapse=None) # Learn the decoders/values, initialized to a null function conn_out = nengo.Connection(memory, recall, learning_rule_type=nengo.PES(1e-3), function=lambda x: np.zeros(d_value)) # Create the error population error = nengo.Ensemble(n_neurons, d_value, label="error") nengo.Connection(learning, error.neurons, transform=[[10.0]] * n_neurons, synapse=None) # Calculate the error and use it to drive the PES rule nengo.Connection(stim_values, error, transform=-1, synapse=None) nengo.Connection(recall, error, synapse=None) nengo.Connection(error, conn_out.learning_rule) # Setup probes p_keys = nengo.Probe(stim_keys, synapse=None, label="p_keys") p_values = nengo.Probe(stim_values, synapse=None, label="p_values") p_learning = nengo.Probe(learning, synapse=None, label="p_learning") p_error = nengo.Probe(error, synapse=0.005, label="p_error") p_recall = nengo.Probe(recall, synapse=None, label="p_recall") probes = [p_keys, p_values, p_learning, p_error, p_recall] if record_encoders: p_encoders = nengo.Probe(conn_in.learning_rule, 'scaled_encoders', label="p_encoders") probes.append(p_encoders) return model, list(), dict(), probes
with model: def stim(t): index = int(t / isi) rng = np.random.RandomState(seed=index) scale = rng.uniform(0.5, 1.0) return vocab.parse('%g*%s' %(scale, items[index % len(items)])).v stim = nengo.Node(stim) intercepts = 0.0 ens = nengo.Ensemble(n_neurons=10, dimensions=D, intercepts=nengo.dists.Choice([intercepts])) nengo.Connection(stim, ens, learning_rule_type=nengo.Voja(post_tau=0.005, learning_rate=1e-3)) output = nengo.Node(None, size_in=D) conn_out = nengo.Connection(ens, output, learning_rule_type=nengo.PES(learning_rate=1e-3), function=lambda x: np.zeros(D)) nengo.Connection(output, conn_out.learning_rule) nengo.Connection(stim, conn_out.learning_rule, transform=-1) display = spa.State(D, vocab=vocab) for e in display.all_ensembles: e.neuron_type = nengo.Direct() nengo.Connection(output, display.input)
# Fast network that learns to predict the answer of the slow net Fast = spa.State(vocab, represent_cc_identity=False, subdimensions=subdimensions) # Create the encoding ensembles encoders = [ nengo.Ensemble(n_neurons, subdimensions, intercepts=[intercepts[i]] * n_neurons, label='Voja encoder') for i in range(int(D / subdimensions)) ] # Learn the encoders/keys voja = nengo.Voja(learning_rate=5e-3, post_synapse=None) connections_in = [ nengo.Connection(ens, encoders[i], synapse=None, learning_rule_type=voja) for i, ens in enumerate(Visual.all_ensembles) ] # Learn the decoders/values, initialized to a null function connections_out = [ nengo.Connection(encoders[i], Fast.input[i * subdimensions:(i + 1) * subdimensions], learning_rule_type=nengo.PES(1e-3), function=lambda x: np.zeros(subdimensions), synapse=None) for i in range(int(D / subdimensions))
# Create the inputs/outputs stim_keys = nengo.Node(output=cycle_array(keys, period, dt), label="stim_keys") stim_values = nengo.Node(output=cycle_array(values, period, dt), label="stim_values") learning = nengo.Node(output=lambda t: -int(t >= T / 2), label="learning") recall = nengo.Node(size_in=d_value, label="recall") # Create the memory memory = nengo.Ensemble(n_neurons, d_key, intercepts=[intercept] * n_neurons, label="memory") # Learn the encoders/keys voja = nengo.Voja(post_tau=None, learning_rate=5e-2) conn_in = nengo.Connection(stim_keys, memory, synapse=None, learning_rule_type=voja) nengo.Connection(learning, conn_in.learning_rule, synapse=None) # Learn the decoders/values, initialized to a null function conn_out = nengo.Connection(memory, recall, learning_rule_type=nengo.PES(1e-3), function=lambda x: np.zeros(d_value)) # Create the error population error = nengo.Ensemble(n_neurons, d_value, label="error") nengo.Connection(learning,
taste = nengo.Ensemble(n_neurons=10 * D, dimensions=D, radius=1.2, neuron_type=nengo.LIF()) """ # linear and angular velocity if random_inputs: velocity = nengo.Node(RandomRun()) else: velocity = nengo.Node([0,0]) nengo.Connection(velocity[:2], env[:2]) """ nengo.Connection(env[3:], taste) voja_loc = nengo.Voja(post_tau=None, learning_rate=5e-2) voja_item = nengo.Voja(post_tau=None, learning_rate=5e-2) memory_loc = nengo.Ensemble(n_neurons=400, dimensions=dim, intercepts=[intercept_loc_to_item] * 400) memory_item = nengo.Ensemble(n_neurons=D * 50, dimensions=D, intercepts=[intercept_item_to_loc] * (D * 50)) # Query a location to get a response for what item was there query_location = nengo.Node([0, 0]) # Location currently being visualized on the map # Represented in encoded coordinates mental_location = nengo.Ensemble(n_neurons=400, dimensions=dim)
activate_direct_mode(model) for ens in direct_mode_ens: assert type(ens.neuron_type) is nengo.Direct for ens in non_direct_mode_ens: assert type(ens.neuron_type) is not nengo.Direct @pytest.mark.parametrize( "learning_rule, weights", ( (nengo.PES(), False), (nengo.BCM(), True), (nengo.Oja(), True), (nengo.Voja(), False), ), ) def test_activate_direct_mode_learning(RefSimulator, learning_rule, weights): with nengo.Network() as model: pre = nengo.Ensemble(10, 1) post = nengo.Ensemble(10, 1) conn = nengo.Connection(pre, post, solver=nengo.solvers.LstsqL2(weights=weights)) conn.learning_rule_type = learning_rule activate_direct_mode(model) with RefSimulator(model) as sim: sim.run(0.01)
nengo.Connection(direct_mode_ens[0], direct_mode_ens[1]) nengo.Connection(non_direct_pre.neurons[0], direct_mode_ens[0]) nengo.Connection(direct_mode_ens[1], non_direct_post.neurons[0]) nengo.Probe(non_direct_probe.neurons) activate_direct_mode(model) for ens in direct_mode_ens: assert type(ens.neuron_type) is nengo.Direct for ens in non_direct_mode_ens: assert type(ens.neuron_type) is not nengo.Direct @pytest.mark.parametrize('learning_rule, weights', ((nengo.PES(), False), (nengo.BCM(), True), (nengo.Oja(), True), (nengo.Voja(), False))) def test_activate_direct_mode_learning(RefSimulator, learning_rule, weights): with nengo.Network() as model: pre = nengo.Ensemble(10, 1) post = nengo.Ensemble(10, 1) conn = nengo.Connection(pre, post, solver=nengo.solvers.LstsqL2(weights=weights)) conn.learning_rule_type = learning_rule activate_direct_mode(model) with RefSimulator(model) as sim: sim.run(0.01)