def test_null_error():
    with nengo.Network():
        a = nengo.Ensemble(1, 1)
        b = nengo.Ensemble(1, 1)

        # works with a decoded connection (since we'll be generating weights as
        # part of the decoding process)
        nengo.Connection(a, b, learning_rule_type=nengo.PES(), transform=None)

        # error on neuron connection for decoder learning rule
        with pytest.raises(ValidationError, match="does not have weights"):
            nengo.Connection(
                a.neurons, b, learning_rule_type=nengo.PES(), transform=None
            )

        # works for decoded connection with solver.weights=True
        nengo.Connection(
            a,
            b,
            solver=nengo.solvers.LstsqL2(weights=True),
            learning_rule_type=nengo.BCM(),
            transform=None,
        )

        # error on neuron connection for weights learning rule
        with pytest.raises(ValidationError, match="does not have weights"):
            nengo.Connection(
                a.neurons, b.neurons, learning_rule_type=nengo.BCM(), transform=None
            )

        # works with encoder learning rules (since they don't require a transform)
        nengo.Connection(a.neurons, b, learning_rule_type=Voja(), transform=None)
Beispiel #2
0
def test_voja_modulate(Simulator, nl_nodirect, seed):
    """Tests that voja's rule can be modulated on/off."""
    n = 200
    learned_vector = np.asarray([0.5])

    def control_signal(t):
        """Modulates the learning on/off."""
        return 0 if t < 0.5 else -1

    m = nengo.Network(seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        control = nengo.Node(output=control_signal)
        u = nengo.Node(output=learned_vector)
        x = nengo.Ensemble(n, dimensions=len(learned_vector))

        conn = nengo.Connection(u,
                                x,
                                synapse=None,
                                learning_rule_type=Voja(None))
        nengo.Connection(control, conn.learning_rule, synapse=None)

        p_enc = nengo.Probe(conn.learning_rule, 'scaled_encoders')

    with Simulator(m) as sim:
        sim.run(1.0)
    tend = sim.trange() > 0.5

    # Check that encoders stop changing after 0.5s
    assert np.allclose(sim.data[p_enc][tend], sim.data[p_enc][-1])

    # Check that encoders changed during first 0.5s
    i = np.where(tend)[0][0]  # first time point after changeover
    assert not np.allclose(sim.data[p_enc][0], sim.data[p_enc][i])
Beispiel #3
0
def test_voja_encoders(Simulator, nl_nodirect, rng, seed):
    """Tests that voja changes active encoders to the input."""
    n = 200
    learned_vector = np.asarray([0.3, -0.4, 0.6])
    learned_vector /= np.linalg.norm(learned_vector)
    n_change = n // 2  # modify first half of the encoders

    # Set the first half to always fire with random encoders, and the
    # remainder to never fire due to their encoder's dot product with the input
    intercepts = np.asarray([-1] * n_change + [0.99] * (n - n_change))
    rand_encoders = UniformHypersphere(surface=True).sample(
        n_change, len(learned_vector), rng=rng)
    encoders = np.append(rand_encoders, [-learned_vector] * (n - n_change),
                         axis=0)

    m = nengo.Network(seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=learned_vector)
        x = nengo.Ensemble(n,
                           dimensions=len(learned_vector),
                           intercepts=intercepts,
                           encoders=encoders,
                           radius=2.0)  # to test encoder scaling

        conn = nengo.Connection(u,
                                x,
                                synapse=None,
                                learning_rule_type=Voja(learning_rate=1e-1))
        p_enc = nengo.Probe(conn.learning_rule, 'scaled_encoders')
        p_enc_ens = nengo.Probe(x, 'scaled_encoders')

    with Simulator(m) as sim:
        sim.run(1.0)
    t = sim.trange()
    tend = t > 0.5

    # Voja's rule relies on knowing exactly how the encoders were scaled
    # during the build process, because it modifies the scaled_encoders signal
    # proportional to this factor. Therefore, we should check that its
    # assumption actually holds.
    encoder_scale = (sim.data[x].gain / x.radius)[:, np.newaxis]
    assert np.allclose(sim.data[x].encoders,
                       sim.data[x].scaled_encoders / encoder_scale)

    # Check that the last half kept the same encoders throughout the simulation
    assert np.allclose(sim.data[p_enc][0, n_change:],
                       sim.data[p_enc][:, n_change:])
    # and that they are also equal to their originally assigned value
    assert np.allclose(
        sim.data[p_enc][0, n_change:] / encoder_scale[n_change:],
        -learned_vector)

    # Check that the first half converged to the input
    assert np.allclose(sim.data[p_enc][tend, :n_change] /
                       encoder_scale[:n_change],
                       learned_vector,
                       atol=0.01)
    # Check that encoders probed from ensemble equal encoders probed from Voja
    assert np.allclose(sim.data[p_enc], sim.data[p_enc_ens])