Пример #1
0
def test_learningrule_attr(seed):
    """Test learning_rule attribute on Connection"""
    def check_rule(rule, conn, rule_type):
        assert rule.connection is conn and rule.learning_rule_type is rule_type

    with nengo.Network(seed=seed):
        a, b, e = [nengo.Ensemble(10, 2) for i in range(3)]
        T = np.ones((10, 10))

        r1 = PES()
        c1 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r1)
        check_rule(c1.learning_rule, c1, r1)

        r2 = [PES(), BCM()]
        c2 = nengo.Connection(a.neurons,
                              b.neurons,
                              learning_rule_type=r2,
                              transform=T)
        assert isinstance(c2.learning_rule, list)
        for rule, rule_type in zip(c2.learning_rule, r2):
            check_rule(rule, c2, rule_type)

        r3 = dict(oja=Oja(), bcm=BCM())
        c3 = nengo.Connection(a.neurons,
                              b.neurons,
                              learning_rule_type=r3,
                              transform=T)
        assert isinstance(c3.learning_rule, dict)
        assert set(c3.learning_rule) == set(r3)  # assert same keys
        for key in r3:
            check_rule(c3.learning_rule[key], c3, r3[key])
Пример #2
0
def test_slicing(Simulator, seed, allclose):
    with nengo.Network(seed=seed) as model:
        a = nengo.Ensemble(50, 1)
        b = nengo.Ensemble(30, 2)
        conn = nengo.Connection(a,
                                b,
                                learning_rule_type=PES(),
                                function=lambda x: (0, 0))
        nengo.Connection(nengo.Node(1.0), a)

        err1 = nengo.Node(lambda t, x: x - 0.75, size_in=1)
        nengo.Connection(b[0], err1)
        nengo.Connection(err1, conn.learning_rule[0])

        err2 = nengo.Node(lambda t, x: x + 0.5, size_in=1)
        nengo.Connection(b[1], err2)
        nengo.Connection(err2, conn.learning_rule[1])

        p = nengo.Probe(b, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(1.0)

    t = sim.trange() > 0.8
    assert allclose(sim.data[p][t, 0], 0.75, atol=0.15)
    assert allclose(sim.data[p][t, 1], -0.5, atol=0.15)
Пример #3
0
    def __init__(self, *args, **kwargs):
        eta = kwargs.pop('eta', 1e-2)
        b_kind = kwargs.pop('b_kind', 'ortho')
        b_normkind = kwargs.pop('b_normkind', None)
        b_scale = kwargs.pop('b_scale', 1.)
        super(FASkipNetwork, self).__init__(*args, **kwargs)

        dout = self.output.output.size_out

        # step = lambda x: (x > 1).astype(x.dtype)
        # ^ TODO: need x to be seriously positive, since filtered value will never be 0.
        #     BUT it's on the input, not the output!

        # --- backwards connections
        for c in self.conns[:-1]:
            neuron_type = c.post_obj.ensemble.neuron_type
            assert isinstance(neuron_type, nengo.LIF)
            def df(j, a=neuron_type.amplitude, tau_rc=neuron_type.tau_rc,
                   tau_ref=neuron_type.tau_ref):
                return a * dliflinear(j, tau_rc, tau_ref)

            c.learning_rule_type = DeltaRule(learning_rate=eta, post_fn=df)

            B = initial_w((dout, c.post.size_in),
                          kind=b_kind, normkind=b_normkind, scale=b_scale)
            nengo.Connection(self.error.output, c.learning_rule, transform=B.T)

        c = self.conns[-1]
        # c.learning_rule_type = DeltaRule(learning_rate=lr)
        c.learning_rule_type = PES(learning_rate=eta)
        nengo.Connection(self.error.output, c.learning_rule)
Пример #4
0
def _test_pes(Simulator, nl, plt, seed,
              pre_neurons=False, post_neurons=False, weight_solver=False,
              vin=np.array([0.5, -0.5]), vout=None, n=200,
              function=None, transform=np.array(1.), rate=1e-3):

    vout = np.array(vin) if vout is None else vout

    model = nengo.Network(seed=seed)
    with model:
        model.config[nengo.Ensemble].neuron_type = nl()

        u = nengo.Node(output=vin)
        v = nengo.Node(output=vout)
        a = nengo.Ensemble(n, dimensions=u.size_out)
        b = nengo.Ensemble(n, dimensions=u.size_out)
        e = nengo.Ensemble(n, dimensions=v.size_out)

        nengo.Connection(u, a)

        bslice = b[:v.size_out] if v.size_out < u.size_out else b
        pre = a.neurons if pre_neurons else a
        post = b.neurons if post_neurons else bslice

        conn = nengo.Connection(pre, post,
                                function=function, transform=transform,
                                learning_rule_type=PES(rate))
        if weight_solver:
            conn.solver = nengo.solvers.LstsqL2(weights=True)

        nengo.Connection(v, e, transform=-1)
        nengo.Connection(bslice, e)
        nengo.Connection(e, conn.learning_rule)

        b_p = nengo.Probe(bslice, synapse=0.03)
        e_p = nengo.Probe(e, synapse=0.03)

        weights_p = nengo.Probe(conn, 'weights', sample_every=0.01)
        corr_p = nengo.Probe(conn.learning_rule, 'correction', synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.5)
    t = sim.trange()
    weights = sim.data[weights_p]

    plt.subplot(311)
    plt.plot(t, sim.data[b_p])
    plt.ylabel("Post decoded value")
    plt.subplot(312)
    plt.plot(t, sim.data[e_p])
    plt.ylabel("Error decoded value")
    plt.subplot(313)
    plt.plot(t, sim.data[corr_p] / rate)
    plt.ylabel("PES correction")
    plt.xlabel("Time (s)")

    tend = t > 0.4
    assert np.allclose(sim.data[b_p][tend], vout, atol=0.05)
    assert np.allclose(sim.data[e_p][tend], 0, atol=0.05)
    assert np.allclose(sim.data[corr_p][tend] / rate, 0, atol=0.05)
    assert not np.allclose(weights[0], weights[-1], atol=1e-5)
Пример #5
0
    def __init__(self, *args, **kwargs):
        eta = kwargs.pop('eta', 1e-2)
        super(ShallowNetwork, self).__init__(*args, **kwargs)

        c = self.conns[-1]
        # c.learning_rule_type = DeltaRule(learning_rate=eta, post_fn=step)
        c.learning_rule_type = PES(learning_rate=eta)
        nengo.Connection(self.error.output, c.learning_rule)
Пример #6
0
def test_frozen():
    """Test attributes inherited from FrozenObject"""
    a = PES(2e-3, 4e-3)
    b = PES(2e-3, 4e-3)
    c = PES(2e-3, 5e-3)

    assert hash(a) == hash(a)
    assert hash(b) == hash(b)
    assert hash(c) == hash(c)

    assert a == b
    assert hash(a) == hash(b)
    assert a != c
    assert hash(a) != hash(c)  # not guaranteed, but highly likely
    assert b != c
    assert hash(b) != hash(c)  # not guaranteed, but highly likely

    with pytest.raises((ValueError, RuntimeError)):
        a.learning_rate = 1e-1
Пример #7
0
def test_frozen():
    """Test attributes inherited from FrozenObject"""
    a = PES(2e-3, 4e-3)
    b = PES(2e-3, 4e-3)
    c = PES(2e-3, 5e-3)

    assert hash(a) == hash(a)
    assert hash(b) == hash(b)
    assert hash(c) == hash(c)

    assert a == b
    assert hash(a) == hash(b)
    assert a != c
    assert hash(a) != hash(c)  # not guaranteed, but highly likely
    assert b != c
    assert hash(b) != hash(c)  # not guaranteed, but highly likely

    with pytest.raises((ValueError, RuntimeError)):
        a.learning_rate = 1e-1
Пример #8
0
def test_pes_weights(Simulator, nl_nodirect, plt, seed, rng):
    n = 200
    learned_vector = [0.5, -0.5]
    rate = 10e-6

    m = nengo.Network(seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=learned_vector)
        a = nengo.Ensemble(n, dimensions=2)
        u_learned = nengo.Ensemble(n, dimensions=2)
        e = nengo.Ensemble(n, dimensions=2)

        initial_weights = rng.uniform(high=1e-3,
                                      size=(a.n_neurons, u_learned.n_neurons))

        nengo.Connection(u, a)
        err_conn = nengo.Connection(e, u_learned, modulatory=True)
        conn = nengo.Connection(a.neurons,
                                u_learned.neurons,
                                transform=initial_weights,
                                learning_rule_type=PES(err_conn, rate))

        nengo.Connection(u_learned, e, transform=-1)
        nengo.Connection(u, e)

        u_learned_p = nengo.Probe(u_learned, synapse=0.05)
        e_p = nengo.Probe(e, synapse=0.05)

        # test probing rule itself
        se_p = nengo.Probe(conn.learning_rule, 'scaled_error', synapse=0.05)

    sim = Simulator(m)
    sim.run(1.)
    t = sim.trange()

    plt.subplot(311)
    plt.plot(t, sim.data[u_learned_p])
    plt.ylabel("Post decoded value")
    plt.subplot(312)
    plt.plot(t, sim.data[e_p])
    plt.ylabel("Error decoded value")
    plt.subplot(313)
    plt.plot(t, sim.data[se_p] / rate)
    plt.ylabel("PES scaled error")
    plt.xlabel("Time (s)")

    tend = t > 0.9
    assert np.allclose(sim.data[u_learned_p][tend], learned_vector, atol=0.05)
    assert np.allclose(sim.data[e_p][tend], 0, atol=0.05)
    assert np.allclose(sim.data[se_p][tend] / rate, 0, atol=0.05)
Пример #9
0
def test_pes_decoders_multidimensional(Simulator, seed, plt):
    n = 200
    input_vector = [0.5, -0.5]
    learned_vector = [input_vector[0]**2 + input_vector[1]**2]

    m = nengo.Network(seed=seed)
    with m:
        u = nengo.Node(output=input_vector)
        v = nengo.Node(output=learned_vector)
        a = nengo.Ensemble(n, dimensions=2)
        u_learned = nengo.Ensemble(n, dimensions=1)
        e = nengo.Ensemble(n, dimensions=1)

        nengo.Connection(u, a)
        err_conn = nengo.Connection(e, u_learned, modulatory=True)

        # initial decoded function is x[0] - x[1]
        conn = nengo.Connection(a,
                                u_learned,
                                function=lambda x: x[0] - x[1],
                                learning_rule_type=PES(err_conn, 5e-6))

        nengo.Connection(u_learned, e, transform=-1)

        # learned function is sum of squares
        nengo.Connection(v, e)

        u_learned_p = nengo.Probe(u_learned, synapse=0.1)
        e_p = nengo.Probe(e, synapse=0.1)
        dec_p = nengo.Probe(conn, 'decoders', sample_every=0.01)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()

    plt.subplot(2, 1, 1)
    plt.plot(t, sim.data[u_learned_p], label="Post")
    plt.plot(t, sim.data[e_p], label="Error")
    plt.legend(loc="best", fontsize="x-small")
    plt.subplot(2, 1, 2)
    plt.plot(sim.trange(dt=0.01), sim.data[dec_p][..., 0])
    plt.title("Change in one 1D decoder")
    plt.xlabel("Time (s)")
    plt.ylabel("Decoding weight")

    tend = t > 0.4
    assert np.allclose(sim.data[u_learned_p][tend], learned_vector, atol=0.05)
    assert np.allclose(sim.data[e_p][tend], 0, atol=0.05)
Пример #10
0
def test_pes_synapse(Simulator, seed, pre_synapse, allclose):
    rule = PES(pre_synapse=pre_synapse)

    with nengo.Network(seed=seed) as model:
        stim = nengo.Node(output=WhiteSignal(0.5, high=10))
        x = nengo.Ensemble(100, 1)

        nengo.Connection(stim, x, synapse=None)
        conn = nengo.Connection(x, x, learning_rule_type=rule)

        p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse)
        p_pes = nengo.Probe(conn.learning_rule, "activities")

    with Simulator(model) as sim:
        sim.run(0.5)

    assert allclose(sim.data[p_neurons][1:, :], sim.data[p_pes][:-1, :])
Пример #11
0
def test_pes_decoders(Simulator, nl_nodirect, seed, plt):
    n = 200
    learned_vector = [0.5, -0.5]

    m = nengo.Network(seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = nl_nodirect()
        u = nengo.Node(output=learned_vector)
        a = nengo.Ensemble(n, dimensions=2)
        u_learned = nengo.Ensemble(n, dimensions=2)
        e = nengo.Ensemble(n, dimensions=2)

        nengo.Connection(u, a)
        nengo.Connection(u_learned, e, transform=-1)
        nengo.Connection(u, e)
        e_c = nengo.Connection(e, u_learned, modulatory=True)
        conn = nengo.Connection(a, u_learned, learning_rule_type=PES(e_c))

        u_learned_p = nengo.Probe(u_learned, synapse=0.1)
        e_p = nengo.Probe(e, synapse=0.1)
        dec_p = nengo.Probe(conn, 'decoders', sample_every=0.01)

    sim = Simulator(m)
    sim.run(0.5)
    t = sim.trange()

    plt.subplot(2, 1, 1)
    plt.plot(t, sim.data[u_learned_p], label="Post")
    plt.plot(t, sim.data[e_p], label="Error")
    plt.legend(loc="best", fontsize="x-small")
    plt.subplot(2, 1, 2)
    plt.plot(sim.trange(dt=0.01), sim.data[dec_p][..., 0])
    plt.title("Change in one 2D decoder")
    plt.xlabel("Time (s)")
    plt.ylabel("Decoding weight")

    tend = t > 0.4
    assert np.allclose(sim.data[u_learned_p][tend], learned_vector, atol=0.05)
    assert np.allclose(sim.data[e_p][tend], 0, atol=0.05)
    assert not np.all(sim.data[dec_p][0] == sim.data[dec_p][-1])
Пример #12
0
def _test_pes(
        Simulator,
        nl,
        plt,
        seed,
        allclose,
        pre_neurons=False,
        post_neurons=False,
        weight_solver=False,
        vin=np.array([0.5, -0.5]),
        vout=None,
        n=200,
        function=None,
        transform=np.array(1.0),
        rate=1e-3,
):
    vout = np.array(vin) if vout is None else vout

    with nengo.Network(seed=seed) as model:
        model.config[nengo.Ensemble].neuron_type = nl()

        stim = nengo.Node(output=vin)
        target = nengo.Node(output=vout)
        pre = nengo.Ensemble(n, dimensions=stim.size_out)
        post = nengo.Ensemble(n, dimensions=stim.size_out)
        error = nengo.Ensemble(n, dimensions=target.size_out)

        nengo.Connection(stim, pre)

        postslice = post[:target.
                         size_out] if target.size_out < stim.size_out else post
        pre = pre.neurons if pre_neurons else pre
        post = post.neurons if post_neurons else postslice

        conn = nengo.Connection(
            pre,
            post,
            function=function,
            transform=transform,
            learning_rule_type=PES(rate),
        )
        if weight_solver:
            conn.solver = nengo.solvers.LstsqL2(weights=True)

        nengo.Connection(target, error, transform=-1)
        nengo.Connection(postslice, error)
        nengo.Connection(error, conn.learning_rule)

        post_p = nengo.Probe(postslice, synapse=0.03)
        error_p = nengo.Probe(error, synapse=0.03)

        weights_p = nengo.Probe(conn, "weights", sample_every=0.01)

    with Simulator(model) as sim:
        sim.run(0.5)
    t = sim.trange()
    weights = sim.data[weights_p]

    plt.subplot(211)
    plt.plot(t, sim.data[post_p])
    plt.ylabel("Post decoded value")
    plt.subplot(212)
    plt.plot(t, sim.data[error_p])
    plt.ylabel("Error decoded value")
    plt.xlabel("Time (s)")

    tend = t > 0.4
    assert allclose(sim.data[post_p][tend], vout, atol=0.05)
    assert allclose(sim.data[error_p][tend], 0, atol=0.05)
    assert not allclose(weights[0], weights[-1], atol=1e-5, record_rmse=False)
# trail runs for each model
errors_iterations_mpes = []
errors_iterations_pes = []
errors_iterations_nef = []
for i in range(iterations):

    learned_model_mpes = LearningModel(neurons,
                                       dimensions,
                                       mPES(gain=gain),
                                       function_to_learn,
                                       convolve=convolve,
                                       seed=seed + i)
    control_model_pes = LearningModel(neurons,
                                      dimensions,
                                      PES(),
                                      function_to_learn,
                                      convolve=convolve,
                                      seed=seed + i)
    control_model_nef = LearningModel(neurons,
                                      dimensions,
                                      None,
                                      function_to_learn,
                                      convolve=convolve,
                                      seed=seed + i)

    print("Iteration", i)
    with nengo_dl.Simulator(learned_model_mpes, device=device) as sim_mpes:
        print("Learning network (mPES)")
        sim_mpes.run(sim_time)
    with nengo_dl.Simulator(control_model_pes, device=device) as sim_pes:
    # the matrix given to transform is the initial weights found in model.sig[conn]["weights"]
    # the initial transform has not influence on learning because it is overwritten by mPES
    # the only influence is on the very first timesteps, before the error becomes large enough
    conn = nengo.Connection(pre.neurons,
                            post.neurons,
                            transform=np.zeros(
                                (post.n_neurons, pre.n_neurons)))

    # Apply the learning rule to conn
    if learning_rule == "mPES":
        conn.learning_rule_type = mPES(noisy=noise_percent,
                                       gain=gain,
                                       seed=seed,
                                       exponent=exponent)
    if learning_rule == "PES":
        conn.learning_rule_type = PES()
    printlv2("Simulating with", conn.learning_rule_type)

    # Provide an error signal to the learning rule
    nengo.Connection(error, conn.learning_rule)

    # Compute the error signal (error = actual - target)
    nengo.Connection(post, error)

    # Subtract the target (this would normally come from some external system)
    nengo.Connection(pre, error, function=function_to_learn, transform=-1)

    # Connect the input node to ensemble pre
    nengo.Connection(input_node, pre)

    nengo.Connection(stop_learning,
Пример #15
0
    def __init__(self, *args, **kwargs):
        eta = kwargs.pop('eta', 1e-2)
        b_kind = kwargs.pop('b_kind', 'ortho')
        b_normkind = kwargs.pop('b_normkind', None)
        b_scale = kwargs.pop('b_scale', 1.)
        e_kind = kwargs.pop('e_kind', 'ensemble')
        super(FATwoStepNetwork, self).__init__(*args, **kwargs)

        dout = self.output.output.size_out
        dhids = [x.n_neurons for x in self.layers]
        n_error = self.n_error

        with self:
            # --- backwards error layers
            labels = ['elayer%d' % i for i in range(len(self.layers))]
            if n_error is None:
                self.elayers = [EAIO(nengo.Node, size_in=dout, label=label)
                                for label in labels]
            elif self.e_kind == 'ensemble':
                self.elayers = [EAIO(
                    nengo.Ensemble, n_error*dout, dout, label=label,
                    encoders=self.e_encoders, intercepts=self.e_intercepts,
                    max_rates=self.e_rates)
                                for label in labels]
            elif e_kind == 'array':
                self.elayers = [nengo.networks.EnsembleArray(
                    n_error, dout, label=label, encoders=self.e_encoders,
                    intercepts=self.e_intercepts, max_rates=self.e_rates)
                                for label in labels]

            self.elps = [nengo.Probe(elayer.output, **self.pargs)
                         for elayer in self.elayers]

        # --- backwards (deep) connections
        error_layers = [self.error] + self.elayers[::-1]
        for e0, e1 in zip(error_layers, error_layers[1:]):
            nengo.Connection(e0.output, e1.input)
            # nengo.Connection(e0.output, e1.input, transform=initial_w((dout, dout), kind='ortho'))

        for c, e in zip(self.conns, self.elayers):
            neuron_type = c.post_obj.ensemble.neuron_type

            # print("Derivative on neuron input")
            # assert isinstance(neuron_type, nengo.LIF)
            # def df(j, a=neuron_type.amplitude, tau_rc=neuron_type.tau_rc,
            #        tau_ref=neuron_type.tau_ref):
            #     return a * dliflinear(j, tau_rc, tau_ref)

            # c.learning_rule_type = DeltaRule(learning_rate=eta, post_fn=df)

            print("Derivative on neuron input")
            post_target = 'in'
            post_tau = 0.005
            # post_fn = deltarule_df('liflinear', neuron_type, post_target=post_target)
            # post_fn = deltarule_df('step', neuron_type, post_target=post_target, damplitude=0.5)
            # post_fn = deltarule_df('step', neuron_type, post_target=post_target, damplitude=0.25)
            post_fn = deltarule_df('step', neuron_type, post_target=post_target, damplitude=0.33)

            # print("Derivative on neuron output")
            # post_target = 'out'
            # post_tau = 0.005
            # # post_fn = deltarule_df('step', neuron_type, post_target=post_target,
            # #                        threshold=np.exp(-6)/post_tau)
            # #                        # threshold=np.exp(-4)/post_tau)
            # post_fn = deltarule_df('step', neuron_type, post_target=post_target,
            #                        threshold=np.exp(-6)/post_tau, damplitude=0.33)

            c.learning_rule_type = DeltaRule(
                learning_rate=eta, post_target=post_target, post_fn=post_fn,
                post_tau=post_tau)

            B = initial_w((dout, c.post.size_in),
                          kind=b_kind, normkind=b_normkind, scale=b_scale)
            nengo.Connection(e.output, c.learning_rule, transform=B.T)

        # --- output (shallow) connection
        c = self.conns[-1]
        # c.learning_rule_type = DeltaRule(learning_rate=lr)
        c.learning_rule_type = PES(learning_rate=eta)
        nengo.Connection(self.error.output, c.learning_rule)