Beispiel #1
0
def test_probeable():
    net = nengo.Network()

    def check_learning_rule(learning_rule_type, expected, net=net):
        assert learning_rule_type.probeable == expected
        post = net.e if isinstance(learning_rule_type, Voja) else net.n
        transform = np.ones(
            (1, 10)) if isinstance(learning_rule_type, Voja) else 1.0
        conn = nengo.Connection(net.n,
                                post,
                                transform=transform,
                                learning_rule_type=learning_rule_type)
        assert conn.learning_rule.probeable == expected

    with net:
        net.e = nengo.Ensemble(10, 1)
        net.n = net.e.neurons
        check_learning_rule(nengo.PES(), ("error", "activities", "delta"))
        check_learning_rule(nengo.RLS(),
                            ("pre_filtered", "error", "delta", "inv_gamma"))
        check_learning_rule(
            nengo.BCM(), ("theta", "pre_filtered", "post_filtered", "delta"))
        check_learning_rule(nengo.Oja(),
                            ("pre_filtered", "post_filtered", "delta"))
        check_learning_rule(nengo.Voja(),
                            ("post_filtered", "scaled_encoders", "delta"))
Beispiel #2
0
def test_set_learning_rule():
    with nengo.Network():
        a = nengo.Ensemble(10, 2)
        b = nengo.Ensemble(10, 2)
        nengo.Connection(a, b, learning_rule_type=nengo.PES())
        nengo.Connection(a, b, learning_rule_type=nengo.PES(),
                         solver=LstsqL2(weights=True))
        nengo.Connection(a.neurons, b.neurons, learning_rule_type=nengo.PES())
        nengo.Connection(a.neurons, b.neurons, learning_rule_type=nengo.Oja())

        n = nengo.Node(output=lambda t, x: t * x, size_in=2)
        with pytest.raises(ValueError):
            nengo.Connection(n, a, learning_rule_type=nengo.PES())
Beispiel #3
0
def test_unsupervised_learning_rule(Simulator, nl_nodirect, learning_rule):
    n = 200
    learned_vector = [0.5, -0.5]

    m = nengo.Network(seed=3902)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=learned_vector)
        a = nengo.Ensemble(n, dimensions=2)
        u_learned = nengo.Ensemble(n, dimensions=2)

        initial_weights = np.random.random((a.n_neurons, u_learned.n_neurons))

        nengo.Connection(u, a)
        nengo.Connection(a.neurons,
                         u_learned.neurons,
                         transform=initial_weights,
                         learning_rule=nengo.Oja())

    sim = Simulator(m)
    sim.run(1.)
Beispiel #4
0
def test_set_learning_rule():
    with nengo.Network():
        a = nengo.Ensemble(10, 2)
        b = nengo.Ensemble(10, 2)
        nengo.Connection(a, b, learning_rule_type=nengo.PES())
        nengo.Connection(
            a, b, learning_rule_type=nengo.PES(), solver=LstsqL2(weights=True)
        )
        nengo.Connection(
            a.neurons, b.neurons, learning_rule_type=nengo.PES(), transform=1
        )
        nengo.Connection(
            a.neurons,
            b.neurons,
            learning_rule_type=nengo.Oja(),
            transform=np.ones((10, 10)),
        )

        n = nengo.Node(output=lambda t, x: t * x, size_in=2)
        with pytest.raises(
            ValidationError, match="'pre' must be of type 'Ensemble'.*PES"
        ):
            nengo.Connection(n, a, learning_rule_type=nengo.PES())
Beispiel #5
0
        nengo.Probe(non_direct_probe.neurons)

    activate_direct_mode(model)

    for ens in direct_mode_ens:
        assert type(ens.neuron_type) is nengo.Direct
    for ens in non_direct_mode_ens:
        assert type(ens.neuron_type) is not nengo.Direct


@pytest.mark.parametrize(
    "learning_rule, weights",
    (
        (nengo.PES(), False),
        (nengo.BCM(), True),
        (nengo.Oja(), True),
        (nengo.Voja(), False),
    ),
)
def test_activate_direct_mode_learning(RefSimulator, learning_rule, weights):
    with nengo.Network() as model:
        pre = nengo.Ensemble(10, 1)
        post = nengo.Ensemble(10, 1)
        conn = nengo.Connection(pre,
                                post,
                                solver=nengo.solvers.LstsqL2(weights=weights))
        conn.learning_rule_type = learning_rule

    activate_direct_mode(model)

    with RefSimulator(model) as sim:
Beispiel #6
0
        nengo.Connection(direct_mode_ens[0], direct_mode_ens[1])

        nengo.Connection(non_direct_pre.neurons[0], direct_mode_ens[0])
        nengo.Connection(direct_mode_ens[1], non_direct_post.neurons[0])
        nengo.Probe(non_direct_probe.neurons)

    activate_direct_mode(model)

    for ens in direct_mode_ens:
        assert type(ens.neuron_type) is nengo.Direct
    for ens in non_direct_mode_ens:
        assert type(ens.neuron_type) is not nengo.Direct


@pytest.mark.parametrize('learning_rule, weights',
                         ((nengo.PES(), False), (nengo.BCM(), True),
                          (nengo.Oja(), True), (nengo.Voja(), False)))
def test_activate_direct_mode_learning(RefSimulator, learning_rule, weights):
    with nengo.Network() as model:
        pre = nengo.Ensemble(10, 1)
        post = nengo.Ensemble(10, 1)
        conn = nengo.Connection(pre,
                                post,
                                solver=nengo.solvers.LstsqL2(weights=weights))
        conn.learning_rule_type = learning_rule

    activate_direct_mode(model)

    with RefSimulator(model) as sim:
        sim.run(0.01)
Beispiel #7
0
        u_learned_p = nengo.Probe(u_learned, synapse=0.1)
        e_p = nengo.Probe(e, synapse=0.1)

    sim = Simulator(m)
    sim.run(1.)

    assert np.allclose(sim.data[u_learned_p][-1], learned_vector, atol=0.05)
    assert np.allclose(sim.data[e_p][-1],
                       np.zeros(len(learned_vector)),
                       atol=0.05)


@pytest.mark.parametrize(
    'learning_rule',
    [nengo.BCM(), nengo.Oja(), [nengo.Oja(), nengo.BCM()]])
def test_unsupervised_learning_rule(Simulator, nl_nodirect, learning_rule):
    n = 200
    learned_vector = [0.5, -0.5]

    m = nengo.Network(seed=3902)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=learned_vector)
        a = nengo.Ensemble(n, dimensions=2)
        u_learned = nengo.Ensemble(n, dimensions=2)

        initial_weights = np.random.random((a.n_neurons, u_learned.n_neurons))

        nengo.Connection(u, a)
        nengo.Connection(a.neurons,
for image in mnist:
    training_images.append(image)

# follows the default model of creating a Nengo SNN
model = nengo.Network()
with model:
    # Cast input into the Nodes to be used as input
    stim = nengo.Node(nengo.processes.PresentInput(training_images, 0.1))

    # Create the layer of input that takes in the data from my_spikes
    a = nengo.Ensemble(n_neurons=784, dimensions=1)

    # Create hidden layer
    b = nengo.Ensemble(n_neurons=1000, dimensions=1)

    # Create layer for output
    output = nengo.Ensemble(n_neurons=10, dimensions=1)

    # Connections made between the input neurons, and the output neurons to the trainer
    nengo.Connection(stim, a.neurons)

    conn_ab = nengo.Connection(a, b, solver=nengo.solvers.LstsqL2(weights=True))
    conn_ab.learning_rule_type = nengo.Oja(learning_rate=6e-8)

    conn_boutput = nengo.Connection(b, output, solver=nengo.solvers.LstsqL2(weights=True))
    conn_boutput.learning_rule_type = nengo.Oja(learning_rate=6e-8)

    pre_p = nengo.Probe(a, synapse=0.01)
    post_p = nengo.Probe(output, synapse=0.01)
    weights_p = nengo.Probe(conn_boutput, 'weights', synapse=0.01, sample_every=0.01)
    B_f = Cycler(np.roll(SPs, -1), .5, 1, SP_duration,
                 n_SPs * n_training_cycles).make_step()
    B = nengo.Ensemble(30 * D, D, intercepts=intercepts)
    B_inp = spa.Transcode(B_f, output_vocab=vocab)
    nengo.Connection(B_inp.output, B)
    B_out = spa.Transcode(input_vocab=vocab, output_vocab=vocab)
    nengo.Connection(B, B_out.input)

    connection = nengo.Connection(
        A.neurons,
        B.neurons,
        transform=np.zeros((B.n_neurons, A.n_neurons)),
        learning_rule_type=nengo.Oja(
            learning_rate=1e-8,
            # beta=0
            # max_weight=.1,
            # min_weight=-.1,
            # bounds="none",
        ),
        # learning_rule_type=nengo.BCM(
        # learning_rate=5e-8,
        # ),
    )

    connection = nengo.Connection(
        A.neurons,
        B.neurons,
        transform=np.zeros((B.n_neurons, A.n_neurons)),
        learning_rule_type=nengo.BCM(
            learning_rate=5e-11,
            # beta=0
    n = v.shape[0]
    k = np.arange(n) + 1
    l1norm = np.sum(v)
    summation = np.sum((v / l1norm) * ((n - k + 0.5) / n))
    return 1 - 2 * summation


print("Starting sparsity: {0}".format(sparsity_measure(
    sim.data[weights_p][0])))
print("Ending sparsity: {0}".format(sparsity_measure(sim.data[weights_p][-1])))

# ## What does Oja do?

# In[ ]:

conn.learning_rule_type = nengo.Oja(learning_rate=6e-8)

# In[ ]:

with nengo.Simulator(model) as sim:
    sim.run(20.0)

# In[ ]:

plt.figure(figsize=(12, 8))
plt.subplot(2, 1, 1)
plt.plot(sim.trange(), sim.data[pre_p], label="Pre")
plt.plot(sim.trange(), sim.data[post_p], label="Post")
plt.ylabel("Decoded value")
plt.ylim(-1.6, 1.6)
plt.legend(loc="lower left")