Esempio n. 1
0
def test_null_error():
    with nengo.Network():
        a = nengo.Ensemble(1, 1)
        b = nengo.Ensemble(1, 1)

        # works with a decoded connection (since we'll be generating weights as
        # part of the decoding process)
        nengo.Connection(a, b, learning_rule_type=nengo.PES(), transform=None)

        # error on neuron connection for decoder learning rule
        with pytest.raises(ValidationError, match="does not have weights"):
            nengo.Connection(
                a.neurons, b, learning_rule_type=nengo.PES(), transform=None
            )

        # works for decoded connection with solver.weights=True
        nengo.Connection(
            a,
            b,
            solver=nengo.solvers.LstsqL2(weights=True),
            learning_rule_type=nengo.BCM(),
            transform=None,
        )

        # error on neuron connection for weights learning rule
        with pytest.raises(ValidationError, match="does not have weights"):
            nengo.Connection(
                a.neurons, b.neurons, learning_rule_type=nengo.BCM(), transform=None
            )

        # works with encoder learning rules (since they don't require a transform)
        nengo.Connection(a.neurons, b, learning_rule_type=Voja(), transform=None)
Esempio n. 2
0
def test_connectionlearningruletypeparam():
    with nengo.Network():
        a = nengo.Ensemble(10, 1)
        b = nengo.Ensemble(11, 1)

        with pytest.raises(ValueError):  # need a 2D transform for BCM
            nengo.Connection(a, b, learning_rule_type=nengo.BCM())

        with pytest.raises(ValueError):  # transform must be correct shape
            nengo.Connection(a, b, transform=np.ones((10, 11)),
                             learning_rule_type=nengo.BCM())
Esempio n. 3
0
def test_connectionlearningruletypeparam():
    with nengo.Network():
        a = nengo.Ensemble(10, 1)
        b = nengo.Ensemble(11, 1)

        with pytest.raises(
                ValidationError,
                match="can only be applied on connections to neurons"):
            nengo.Connection(a, b, learning_rule_type=nengo.BCM())

        with pytest.raises(ValidationError,
                           match="does not match expected shape"):
            nengo.Connection(a,
                             b,
                             transform=np.ones((10, 11)),
                             learning_rule_type=nengo.BCM())
Esempio n. 4
0
def test_probeable():
    net = nengo.Network()

    def check_learning_rule(learning_rule_type, expected, net=net):
        assert learning_rule_type.probeable == expected
        post = net.e if isinstance(learning_rule_type, Voja) else net.n
        transform = np.ones(
            (1, 10)) if isinstance(learning_rule_type, Voja) else 1.0
        conn = nengo.Connection(net.n,
                                post,
                                transform=transform,
                                learning_rule_type=learning_rule_type)
        assert conn.learning_rule.probeable == expected

    with net:
        net.e = nengo.Ensemble(10, 1)
        net.n = net.e.neurons
        check_learning_rule(nengo.PES(), ("error", "activities", "delta"))
        check_learning_rule(nengo.RLS(),
                            ("pre_filtered", "error", "delta", "inv_gamma"))
        check_learning_rule(
            nengo.BCM(), ("theta", "pre_filtered", "post_filtered", "delta"))
        check_learning_rule(nengo.Oja(),
                            ("pre_filtered", "post_filtered", "delta"))
        check_learning_rule(nengo.Voja(),
                            ("post_filtered", "scaled_encoders", "delta"))
Esempio n. 5
0
def test_bad_weight_learning_rule_transform_shape():
    with nengo.Network():
        ens = nengo.Ensemble(5, 1)

        with pytest.raises(ValidationError,
                           match="Transform.*post_neurons x pre_neur"):
            nengo.Connection(
                ens,
                ens.neurons,
                transform=np.ones((5, 1)),
                learning_rule_type=nengo.BCM(),
            )
Esempio n. 6
0
def test_learning_transform_shape_error(Simulator):
    with nengo.Network() as net:
        a = nengo.Ensemble(10, dimensions=2)
        b = nengo.Ensemble(10, dimensions=2)
        nengo.Connection(
            a.neurons, b.neurons, transform=1, learning_rule_type=nengo.BCM()
        )

    with pytest.raises(
        BuildError, match="'transform' must be a 2-dimensional array for learning"
    ):
        with Simulator(net):
            pass
Esempio n. 7
0
    def __init__(
        self,
        source,
        target,
        rate,
    ):
        self.source = source
        self.target = target
        assert self.source.prediction_out
        assert self.target.prediction_in

        connection = nengo.Connection(
            self.source.prediction_out_ens.neurons,
            self.target.prediction_in_ens.neurons,
            transform=np.zeros((self.target.prediction_in_ens.n_neurons,
                                self.source.prediction_out_ens.n_neurons)),
            learning_rule_type=nengo.BCM(learning_rate=rate, ),
        )
Esempio n. 8
0
def test_reset(Simulator, nl_nodirect):
    """Make sure resetting actually resets.

    A learning network on weights is used as the example network as the
    ultimate stress test; lots of weird stuff happens during learning, but
    if we're able to reset back to initial connection weights and everything
    then we're probably doing resetting right.
    """
    noise = whitenoise(0.1, 5, dimensions=2, seed=328)
    m = nengo.Network(seed=3902)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=noise)
        ens = nengo.Ensemble(200, dimensions=2)
        error = nengo.Ensemble(200, dimensions=2)
        square = nengo.Ensemble(200, dimensions=2)

        nengo.Connection(u, ens)
        nengo.Connection(u, error)
        nengo.Connection(square, error, transform=-1)
        err_conn = nengo.Connection(error, square, modulatory=True)
        nengo.Connection(ens,
                         square,
                         learning_rule=[nengo.PES(err_conn),
                                        nengo.BCM()],
                         solver=LstsqL2nz(weights=True))

        square_p = nengo.Probe(square, synapse=0.1)
        err_p = nengo.Probe(error, synapse=0.1)

    sim = Simulator(m)
    sim.run(0.2)
    sim.run(0.6)

    first_t = sim.trange()
    first_square_p = np.array(sim.data[square_p], copy=True)
    first_err_p = np.array(sim.data[err_p], copy=True)

    sim.reset()
    sim.run(0.8)

    assert np.all(sim.trange() == first_t)
    assert np.all(sim.data[square_p] == first_square_p)
    assert np.all(sim.data[err_p] == first_err_p)
    nengo.Connection(stimD, ensPreD)


    inhibit = nengo.Node(stimCinput)
    nengo.Connection(inhibit, ensPost1.neurons, transform=np.ones((ensPost1.n_neurons, 1)) * 10, synapse=None)

    inhibit2 = nengo.Node(stimAinput)
    nengo.Connection(inhibit2, ensPost2.neurons, transform=np.ones((ensPost2.n_neurons, 1)) * 10, synapse=None)

    # how do I compute a similarity trace????
    # stop learning once the stimularity crosses some threshold value

    connA = nengo.Connection(ensPreA, ensPost1,
    solver=nengo.solvers.LstsqL2(weights=True))

    connA.learning_rule_type = nengo.BCM(learning_rate=5e-10)

    connB = nengo.Connection(ensPreB, ensPost1,
    solver=nengo.solvers.LstsqL2(weights=True))

    connB.learning_rule_type = nengo.BCM(learning_rate=5e-10)

    connC = nengo.Connection(ensPreC, ensPost2,
    solver=nengo.solvers.LstsqL2(weights=True))

    connC.learning_rule_type = nengo.BCM(learning_rate=5e-10)

    connD = nengo.Connection(ensPreD, ensPost2,
    solver=nengo.solvers.LstsqL2(weights=True))

    connD.learning_rule_type = nengo.BCM(learning_rate=5e-10)
Esempio n. 10
0
        nengo.Connection(direct_mode_ens[1], non_direct_post.neurons[0])
        nengo.Probe(non_direct_probe.neurons)

    activate_direct_mode(model)

    for ens in direct_mode_ens:
        assert type(ens.neuron_type) is nengo.Direct
    for ens in non_direct_mode_ens:
        assert type(ens.neuron_type) is not nengo.Direct


@pytest.mark.parametrize(
    "learning_rule, weights",
    (
        (nengo.PES(), False),
        (nengo.BCM(), True),
        (nengo.Oja(), True),
        (nengo.Voja(), False),
    ),
)
def test_activate_direct_mode_learning(RefSimulator, learning_rule, weights):
    with nengo.Network() as model:
        pre = nengo.Ensemble(10, 1)
        post = nengo.Ensemble(10, 1)
        conn = nengo.Connection(pre,
                                post,
                                solver=nengo.solvers.LstsqL2(weights=weights))
        conn.learning_rule_type = learning_rule

    activate_direct_mode(model)
Esempio n. 11
0
    #Square
    nengo.Connection(pre, error, function=lambda x: x**2, transform=-1)
    #Communication Channel
    #nengo.Connection(pre, error, transform=-1, synapse=0.02)
    nengo.Connection(post, error, transform=1, synapse=0.02)

    #Connecting pre population to post population (communication channel)
    conn = nengo.Connection(pre,
                            post,
                            function=lambda x: np.random.random(1),
                            solver=nengo.solvers.LstsqL2(weights=True))

    #Adding the learning rule to the connection
    conn.learning_rule_type = {
        'my_pes': nengo.PES(learning_rate=1e-3),
        'my_bcm': nengo.BCM()
    }

    #Error connections don't impart current
    error_conn = nengo.Connection(error, conn.learning_rule['my_pes'])

    #Providing input to the model
    input = nengo.Node(WhiteSignal(30, high=10))  # RMS = 0.5 by default
    # Connecting input to the pre ensemble
    nengo.Connection(input, pre, synapse=0.02)

    #function to inhibit the error population after 25 seconds
    def inhib(t):
        return 2.0 if t > 15.0 else 0.0

    #Connecting inhibit population to error population
Esempio n. 12
0
        nengo.Connection(direct_mode_ens[0], direct_mode_ens[1])

        nengo.Connection(non_direct_pre.neurons[0], direct_mode_ens[0])
        nengo.Connection(direct_mode_ens[1], non_direct_post.neurons[0])
        nengo.Probe(non_direct_probe.neurons)

    activate_direct_mode(model)

    for ens in direct_mode_ens:
        assert type(ens.neuron_type) is nengo.Direct
    for ens in non_direct_mode_ens:
        assert type(ens.neuron_type) is not nengo.Direct


@pytest.mark.parametrize('learning_rule, weights',
                         ((nengo.PES(), False), (nengo.BCM(), True),
                          (nengo.Oja(), True), (nengo.Voja(), False)))
def test_activate_direct_mode_learning(RefSimulator, learning_rule, weights):
    with nengo.Network() as model:
        pre = nengo.Ensemble(10, 1)
        post = nengo.Ensemble(10, 1)
        conn = nengo.Connection(pre,
                                post,
                                solver=nengo.solvers.LstsqL2(weights=weights))
        conn.learning_rule_type = learning_rule

    activate_direct_mode(model)

    with RefSimulator(model) as sim:
        sim.run(0.01)
Esempio n. 13
0
        u_learned_p = nengo.Probe(u_learned, synapse=0.1)
        e_p = nengo.Probe(e, synapse=0.1)

    sim = Simulator(m)
    sim.run(1.)

    assert np.allclose(sim.data[u_learned_p][-1], learned_vector, atol=0.05)
    assert np.allclose(sim.data[e_p][-1],
                       np.zeros(len(learned_vector)),
                       atol=0.05)


@pytest.mark.parametrize(
    'learning_rule',
    [nengo.BCM(), nengo.Oja(), [nengo.Oja(), nengo.BCM()]])
def test_unsupervised_learning_rule(Simulator, nl_nodirect, learning_rule):
    n = 200
    learned_vector = [0.5, -0.5]

    m = nengo.Network(seed=3902)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=learned_vector)
        a = nengo.Ensemble(n, dimensions=2)
        u_learned = nengo.Ensemble(n, dimensions=2)

        initial_weights = np.random.random((a.n_neurons, u_learned.n_neurons))

        nengo.Connection(u, a)
        nengo.Connection(a.neurons,
        A.neurons,
        B.neurons,
        transform=np.zeros((B.n_neurons, A.n_neurons)),
        learning_rule_type=nengo.Oja(
            learning_rate=1e-8,
            # beta=0
            # max_weight=.1,
            # min_weight=-.1,
            # bounds="none",
        ),
        # learning_rule_type=nengo.BCM(
        # learning_rate=5e-8,
        # ),
    )

    connection = nengo.Connection(
        A.neurons,
        B.neurons,
        transform=np.zeros((B.n_neurons, A.n_neurons)),
        learning_rule_type=nengo.BCM(
            learning_rate=5e-11,
            # beta=0
            # max_weight=.1,
            # min_weight=-.1,
            # bounds="none",
        ),
        # learning_rule_type=nengo.BCM(
        # learning_rate=5e-8,
        # ),
    )
vocabMOTOR.populate('RIGHT; LEFT; NOTHING')

with spa.Network(seed=100) as model:

    exp = Timing(trial_duration, cue_length, target_length, tar_ON)

    # define population
    target = spa.State(D, label='target')
    motor = spa.State(D, feedback=0.9, label='motor')
    pfcCUE = spa.State(D, feedback=0.9, label='pfcCUE')
    pfcRULE = spa.State(D, feedback=0.1, label='pfcRULE')
    md = spa.State(D, feedback=0, label='md')

    # connections
    conn = nengo.Connection(md.output, pfcRULE.input, synapse=0.05)
    conn.learning_rule_type = nengo.BCM(learning_rate=1e-9)

    # define inputs
    stim_cue = spa.Transcode(function=exp.presentCues,
                             output_vocab=pfcCUE.vocab,
                             label='stim Cue')
    stim_cue >> pfcCUE
    stim_target = spa.Transcode(function=exp.presentTargets,
                                output_vocab=target.vocab,
                                label='stim Target')
    stim_target >> target

    with spa.ActionSelection():
        spa.ifmax(0.3 * spa.dot(s.NOTHING, pfcCUE), s.NOTHING >> pfcRULE)

        spa.ifmax(spa.dot(s.CUE_A + s.CUE_C, pfcCUE), s.VIS >> md,
Esempio n. 16
0
# In[ ]:

# Verify that it does a communication channel
with nengo.Simulator(model) as sim:
    sim.run(2.0)

plt.plot(sim.trange(), sim.data[pre_p], label="Pre")
plt.plot(sim.trange(), sim.data[post_p], label="Post")
plt.ylabel("Decoded value")
plt.legend(loc="best")

# ## What does BCM do?

# In[ ]:

conn.learning_rule_type = nengo.BCM(learning_rate=5e-10)
with model:
    weights_p = nengo.Probe(conn, 'weights', synapse=0.01, sample_every=0.01)

# In[ ]:

with nengo.Simulator(model) as sim:
    sim.run(20.0)

# In[ ]:

plt.figure(figsize=(12, 8))
plt.subplot(2, 1, 1)
plt.plot(sim.trange(), sim.data[pre_p], label="Pre")
plt.plot(sim.trange(), sim.data[post_p], label="Post")
plt.ylabel("Decoded value")