Exemple #1
0
def test_balreal():
    isys = Lowpass(0.05)
    noise = 0.5 * Lowpass(0.01) + 0.5 * Alpha(0.005)
    p = 0.8
    sys = p * isys + (1 - p) * noise

    T, Tinv, S = balanced_transformation(sys)
    assert np.allclose(inv(T), Tinv)
    assert np.allclose(S, hsvd(sys))

    balsys = sys.transform(T, Tinv)
    assert balsys == sys

    assert np.all(S >= 0)
    assert np.all(S[0] > 0.3)
    assert np.all(S[1:] < 0.05)
    assert np.allclose(sorted(S, reverse=True), S)

    P = control_gram(balsys)
    Q = observe_gram(balsys)

    diag = np.diag_indices(len(P))
    offdiag = np.ones_like(P, dtype=bool)
    offdiag[diag] = False
    offdiag = np.where(offdiag)

    assert np.allclose(P[diag], S)
    assert np.allclose(P[offdiag], 0)
    assert np.allclose(Q[diag], S)
    assert np.allclose(Q[offdiag], 0)
Exemple #2
0
def test_modred(rng):
    dt = 0.001
    isys = Lowpass(0.05)
    noise = 0.5 * Lowpass(0.01) + 0.5 * Alpha(0.005)
    p = 0.999
    sys = p * isys + (1 - p) * noise

    T, Tinv, S = balanced_transformation(sys)
    balsys = sys.transform(T, Tinv)

    # Keeping just the best state should remove the 3 noise dimensions
    # Discarding the lowest state should do at least as well
    for keep_states in (S.argmax(),
                        list(set(range(len(sys))) - set((S.argmin(), )))):
        delsys = modred(balsys, keep_states)
        assert delsys.order_den == np.asarray(keep_states).size

        u = rng.normal(size=2000)

        expected = sys.filt(u, dt)
        actual = delsys.filt(u, dt)
        assert rmse(expected, actual) < 1e-4

        step = np.zeros(2000)
        step[50:] = 1.0
        dcsys = modred(balsys, keep_states, method='dc')
        assert np.allclose(dcsys.dcgain, balsys.dcgain)

        # use of shift related to nengo issue #938
        assert not sys.has_passthrough
        assert dcsys.has_passthrough
        expected = shift(sys.filt(step, dt))
        actual = dcsys.filt(step, dt)
        assert rmse(expected, actual) < 1e-4
def test_linear_network(Simulator, plt, seed, rng, neuron_type, atol, atol_x):
    n_neurons = 500
    dt = 0.001
    T = 1.0

    sys = Lowpass(0.1)
    scale_input = 2.0

    synapse = 0.02
    tau_probe = 0.005

    with Network(seed=seed) as model:
        stim = nengo.Node(
            output=nengo.processes.WhiteSignal(T, high=10, seed=seed))
        subnet = LinearNetwork(sys,
                               n_neurons_per_ensemble=n_neurons,
                               synapse=synapse,
                               input_synapse=synapse,
                               dt=dt,
                               neuron_type=neuron_type)
        nengo.Connection(stim,
                         subnet.input,
                         synapse=None,
                         transform=scale_input)

        assert subnet.synapse == subnet.input_synapse
        assert subnet.output_synapse is None

        p_input = nengo.Probe(subnet.input, synapse=tau_probe)
        p_x = nengo.Probe(subnet.state.output, synapse=tau_probe)
        p_output = nengo.Probe(subnet.output, synapse=tau_probe)

    with Simulator(model, dt=dt) as sim:
        sim.run(T)

    ideal_output = shift(sys.filt(sim.data[p_input]))
    ideal_x = shift(subnet.realization.X.filt(sim.data[p_input]))

    plt.plot(sim.trange(), sim.data[p_input], label="Input", alpha=0.5)
    plt.plot(sim.trange(), sim.data[p_output], label="Actual y", alpha=0.5)
    plt.plot(sim.trange(),
             ideal_output,
             label="Expected y",
             alpha=0.5,
             linestyle='--')
    plt.plot(sim.trange(), sim.data[p_x], label="Actual x", alpha=0.5)
    plt.plot(sim.trange(),
             ideal_x,
             label="Expected x",
             alpha=0.5,
             linestyle='--')
    plt.legend()

    assert nrmse(sim.data[p_output], ideal_output) < atol
    assert nrmse(sim.data[p_x].squeeze(), ideal_x.squeeze()) < atol_x
def goTarget(f1=Lowpass(0.01),
             f2=Lowpass(0.1),
             stim=lambda t: 0,
             gating=lambda t: 0,
             N=100,
             t=10,
             dt=0.001,
             m=Uniform(30, 30),
             i=Uniform(-1, 0.6),
             kInh=-1.5,
             seed=0):
    wInh = kInh * np.ones((N, 1))
    with nengo.Network(seed=seed) as model:
        inpt = nengo.Node(stim)
        gate = nengo.Node(gating)
        fdfw = nengo.Ensemble(N, 1, seed=seed)
        fdbk = nengo.Ensemble(N,
                              1,
                              max_rates=m,
                              intercepts=i,
                              neuron_type=nengo.LIF(),
                              seed=seed)
        ens = nengo.Ensemble(N,
                             1,
                             max_rates=m,
                             intercepts=i,
                             neuron_type=nengo.LIF(),
                             seed=seed)
        nengo.Connection(inpt, fdfw, synapse=None)
        nengo.Connection(fdfw, ens, synapse=f1)
        nengo.Connection(ens, fdbk, synapse=f1)
        nengo.Connection(fdbk, ens, synapse=f2)
        nengo.Connection(gate,
                         fdfw.neurons,
                         transform=wInh,
                         function=lambda x: x)
        nengo.Connection(gate,
                         fdbk.neurons,
                         transform=wInh,
                         function=lambda x: 1 - x)
        pInpt = nengo.Probe(inpt, synapse=f2)
        pGate = nengo.Probe(gate, synapse=None)
        pFdfw = nengo.Probe(fdfw, synapse=f2)
        pFdbk = nengo.Probe(fdbk, synapse=f2)
        pEns = nengo.Probe(ens, synapse=f2)
    with nengo.Simulator(model, seed=seed) as sim:
        sim.run(t)
    return dict(times=sim.trange(),
                inpt=sim.data[pInpt],
                gate=sim.data[pGate],
                fdfw=sim.data[pFdfw],
                fdbk=sim.data[pFdbk],
                ens=sim.data[pEns])
def easy(t=10, dt=0.001, f1=Lowpass(1e-2), f2=Lowpass(1e-1)):
    stim = makeSignal(t=t, dt=dt, f=f2)
    gating = lambda t: 0 if (0 < t < 1 or 8 < t < 9) else 1
    data = goTarget(t=t, f1=f1, f2=f2, stim=stim, gating=gating)

    fig, ax = plt.subplots()
    ax.plot(data['times'], data['inpt'], linestyle="--", label='inpt')
    ax.plot(data['times'], data['gate'], linestyle="--", label='gate')
    ax.plot(data['times'], data['fdfw'], alpha=0.5, label='fdfw')
    #     ax.plot(data['times'], data['fdbk'], alpha=0.5, label='fdbk')
    ax.plot(data['times'], data['ens'], alpha=0.5, label='ens')
    ax.legend(loc="upper right")
    ax.set(xlabel="time (s)", ylabel=r"$\mathbf{\hat{x}}(t)$")
    fig.savefig("plots/gatedMemory_goTarget.pdf")
Exemple #6
0
def test_sim_new_synapse(Simulator):
    # Create a new synapse object and simulate it
    synapse = Lowpass(0.1) - Lowpass(0.01)
    with Network() as model:
        stim = nengo.Node(output=np.sin)
        x = nengo.Node(size_in=1)
        nengo.Connection(stim, x, synapse=synapse)
        p_stim = nengo.Probe(stim, synapse=None)
        p_x = nengo.Probe(x, synapse=None)

    with Simulator(model) as sim:
        sim.run(0.1)

    assert np.allclose(shift(synapse.filt(sim.data[p_stim])),
                       sim.data[p_x])
Exemple #7
0
def test_minreal():
    sys1 = Lowpass(0.05)
    sys2 = Lowpass(0.01)
    sys3 = sys2 * sys2
    sys4 = LinearSystem(2)

    assert pole_zero_cancel(sys1) == sys1
    assert pole_zero_cancel(sys3) == sys3
    assert pole_zero_cancel(sys3 / sys2, tol=1e-4) == sys2  # numpy 1.9.2
    assert (pole_zero_cancel(sys1 * sys2 / sys3,
                             tol=1e-4) == sys1 / sys2)  # numpy 1.9.2

    assert (pole_zero_cancel(sys2 / sys3, tol=1e-4) == pole_zero_cancel(~sys2)
            )  # numpy 1.9.2
    assert (pole_zero_cancel(sys3 * sys4) == pole_zero_cancel(sys4) *
            pole_zero_cancel(sys3))
Exemple #8
0
def make_normed_flipped(value=1.0,
                        t=1.0,
                        dt=0.001,
                        N=1,
                        f=Lowpass(0.01),
                        seed=0):
    print(
        'Creating input signal from concatenated, normalized, flipped white noise'
    )
    stim_length = int(t * N / dt) + 1
    u_list = np.zeros((stim_length, 1))
    for n in range(N):
        stim_func = nengo.processes.WhiteSignal(period=t / 2,
                                                high=1,
                                                rms=0.5,
                                                seed=seed + n)
        with nengo.Network() as model:
            model.t_half = t / 2

            def flip(t, x):
                return x if t < model.t_half else -1.0 * x

            u_raw = nengo.Node(stim_func)
            u = nengo.Node(output=flip, size_in=1)
            nengo.Connection(u_raw, u, synapse=None)
            p_u = nengo.Probe(u, synapse=None)
        with nengo.Simulator(model, progress_bar=False, dt=dt) as sim:
            sim.run(t, progress_bar=False)
        u = f.filt(sim.data[p_u], dt=dt)
        norm = value / np.max(np.abs(u))
        u_list[n * int(t / dt):(n + 1) * int(t / dt)] = sim.data[p_u] * norm
    stim_func = lambda t: u_list[int(t / dt)]
    return stim_func
Exemple #9
0
def test_l1_norm_known():
    # Check that Lowpass has a norm of exactly 1
    l1, rtol = l1_norm(Lowpass(0.1))
    assert np.allclose(l1, 1)
    assert np.allclose(rtol, 0)

    # Check that passthrough is handled properly
    assert np.allclose(l1_norm(Lowpass(0.1) + 5)[0], 6)
    assert np.allclose(l1_norm(Lowpass(0.1) - 5)[0], 6)

    # Check that Alpha scaled by a has a norm of approximately abs(a)
    for a in (-2, 3):
        for desired_rtol in (1e-1, 1e-2, 1e-4, 1e-8):
            l1, rtol = l1_norm(a * Alpha(0.1), rtol=desired_rtol)
            assert np.allclose(l1, abs(a), rtol=rtol)
            assert rtol <= desired_rtol
Exemple #10
0
def make_signal(value=1.0,
                t=10.0,
                t_flat=3,
                dt=0.001,
                f=Lowpass(0.01),
                normed='x',
                seed=0,
                test=False):
    stim_func = nengo.processes.WhiteSignal(period=t / 2,
                                            high=1.0,
                                            rms=0.5,
                                            seed=seed)
    with nengo.Network() as model:
        u = nengo.Node(stim_func)
        p_u = nengo.Probe(u, synapse=None)
        p_x = nengo.Probe(u, synapse=1 / s)
    with nengo.Simulator(model, progress_bar=False, dt=dt) as sim:
        sim.run(t / 2, progress_bar=False)
    u = f.filt(sim.data[p_u], dt=dt)
    x = f.filt(sim.data[p_x], dt=dt)
    if normed == 'u':
        norm = value / np.max(np.abs(u))
    elif normed == 'x':
        norm = value / np.max(np.abs(x))
    vals1 = sim.data[p_u][:int(t / 4 / dt), 0] * norm
    vals2 = sim.data[p_u][int(t / 4 / dt):, 0] * norm
    flat = np.zeros((int(t_flat / dt)))
    if test:
        #         result = np.concatenate((flat, vals1, flat))
        result = np.concatenate((vals1, vals2, -vals1, -vals2, vals1, flat))
    else:
        result = np.concatenate(
            (flat, vals1, flat, vals2, -vals1, flat, -vals2))
    return result
Exemple #11
0
def test_invalid_realize():
    sys = Lowpass(0.1)

    with pytest.raises(ValueError):
        _realize(sys, radii=[[1]], T=np.eye(len(sys)))

    with pytest.raises(ValueError):
        _realize(sys, radii=[1, 2], T=np.eye(len(sys)))
Exemple #12
0
def test_mapping(Simulator, plt, seed):
    sys = Alpha(0.1)
    syn = Lowpass(0.01)
    gsyn = 2*syn  # scaled lowpass
    isyn = 2/s  # scaled integrator
    dt = 0.001

    ss = ss2sim(sys, syn, None)  # normal lowpass, continuous
    dss = ss2sim(sys, syn, dt)  # normal lowpass, discrete
    gss = ss2sim(sys, gsyn, None)  # scaled lowpass, continuous
    gdss = ss2sim(sys, gsyn, dt)  # scaled lowpass, discrete
    iss = ss2sim(sys, isyn, None)  # scaled integrator, continuous
    idss = ss2sim(sys, isyn, dt)  # scaled integrator, discrete
    assert ss.analog and gss.analog and iss.analog
    assert not (dss.analog or gdss.analog or idss.analog)

    with Network(seed=seed) as model:
        stim = nengo.Node(output=lambda t: np.sin(20*np.pi*t))

        probes = []
        for mapped, synapse in ((ss, syn), (dss, syn), (gss, gsyn),
                                (gdss, gsyn), (iss, isyn), (idss, isyn)):
            A, B, C, D = mapped.ss
            x = nengo.Node(size_in=2)
            y = nengo.Node(size_in=1)

            nengo.Connection(stim, x, transform=B, synapse=synapse)
            nengo.Connection(x, x, transform=A, synapse=synapse)
            nengo.Connection(x, y, transform=C, synapse=None)
            nengo.Connection(stim, y, transform=D, synapse=None)

            probes.append(nengo.Probe(y))

        p_stim = nengo.Probe(stim)

    pss, pdss, pgss, pgdss, piss, pidss = probes

    with Simulator(model, dt=dt) as sim:
        sim.run(1.0)

    expected = shift(sys.filt(sim.data[p_stim], dt))

    plt.plot(sim.trange(), sim.data[pss], label="Continuous", alpha=0.5)
    plt.plot(sim.trange(), sim.data[pdss], label="Discrete", alpha=0.5)
    plt.plot(sim.trange(), sim.data[pgss], label="Gain Cont.", alpha=0.5)
    plt.plot(sim.trange(), sim.data[pgdss], label="Gain Disc.", alpha=0.5)
    plt.plot(sim.trange(), sim.data[piss], label="Integ Cont.", alpha=0.5)
    plt.plot(sim.trange(), sim.data[pidss], label="Integ Disc.", alpha=0.5)
    plt.plot(sim.trange(), expected, label="Expected", linestyle='--')
    plt.legend()

    assert np.allclose(sim.data[pss], expected, atol=0.01)
    assert np.allclose(sim.data[pdss], expected)
    assert np.allclose(sim.data[pgss], expected, atol=0.01)
    assert np.allclose(sim.data[pgdss], expected)
    assert np.allclose(sim.data[piss], expected, atol=0.01)
    assert np.allclose(sim.data[pidss], expected)
Exemple #13
0
def test_is_stable():
    sys = Lowpass(0.1)
    assert sys.is_stable

    assert not (~s).is_stable  # integrator

    assert LinearSystem(1).is_stable

    assert (~(z * (z - 0.5))).is_stable
    assert not (z / (z - 1)).is_stable  # discrete integrator
Exemple #14
0
    def do_trial(name, seed, length=2000, dt=0.001, tau_probe=0.02,
                 sanity=False, **kwargs):
        # Note: depends on the globals, (factory, C, model, u, p_u, P, sys)

        process = nengo.processes.WhiteSignal(
            period=length*dt, rms=power, high=freq, y0=0, seed=seed)

        test_u = process.run_steps(length, dt=dt)
        x_ideal = sys.X.filt(test_u, dt=dt)

        if sanity:
            analyze("ideal-%s" % name, 
                    t=process.ntrange(length, dt=dt),
                    u=test_u,
                    x_hat=x_ideal,
                    x_ideal=x_ideal,
                    C=C,
                    theta=theta,
                    dump_file=False,
                    do_plot=False)
            
        u.output = process

        with factory(network=model, dt=dt) as sim:
            sim.run(length*dt)
            if post_fixture:
                post_fixture(sim)

        assert np.allclose(test_u, np.asarray(sim.data[p_u]))

        # Use discrete principle 3, offline, to get x_hat
        # from the unfiltered spikes representing x.
        # This is analagous to probing the PSC, pre-encoding.
        syn_probe = Lowpass(tau_probe)
        map_out = ss2sim(sys, synapse=syn_probe, dt=dt)
        x_raw = np.asarray([sim.data[p] for p in P]).squeeze()
        f = map_out.A.dot(x_raw) + map_out.B.dot(test_u.T)
        x_hat = syn_probe.filt(f, axis=1, dt=dt).T

        return analyze(
            name=name, t=sim.trange(), u=test_u,
            x_hat=x_hat, x_ideal=x_ideal, C=C,
            theta=theta, **kwargs)
Exemple #15
0
def make_signal(t=10.0, dt=0.001, f=Lowpass(0.01), seed=0):
    stim = nengo.processes.WhiteSignal(period=t, high=1.0, rms=0.5, seed=seed)
    with nengo.Network() as model:
        u = nengo.Node(stim)
        p_u = nengo.Probe(u, synapse=None)
    with nengo.Simulator(model, progress_bar=False, dt=dt) as sim:
        sim.run(t, progress_bar=False)
    u = f.filt(sim.data[p_u], dt=dt)
    norm = 1.0 / np.max(np.abs(u))
    stim = np.ravel(u) * norm
    return np.concatenate(([0], stim, -stim))[::2]
Exemple #16
0
def test_grams():
    sys = 0.6 * Alpha(0.01) + 0.4 * Lowpass(0.05)

    A, B, C, D = sys2ss(sys)

    P = control_gram(sys)
    assert np.allclose(np.dot(A, P) + np.dot(P, A.T), -np.dot(B, B.T))
    assert matrix_rank(P) == len(P)  # controllable

    Q = observe_gram(sys)
    assert np.allclose(np.dot(A.T, Q) + np.dot(Q, A), -np.dot(C.T, C))
    assert matrix_rank(Q) == len(Q)  # observable
Exemple #17
0
def test_principle3_continuous():
    sys = PadeDelay(0.1, order=5)

    tau = 0.01
    syn = Lowpass(tau)

    FH = ss2sim(sys, syn, dt=None)

    assert np.allclose(FH.A, tau * sys.A + np.eye(len(sys)))
    assert np.allclose(FH.B, tau * sys.B)
    assert np.allclose(FH.C, sys.C)
    assert np.allclose(FH.D, sys.D)
Exemple #18
0
def test_unsupported_mapping():
    lpf = Lowpass(0.1)

    with pytest.raises(ValueError):
        ss2sim(sys=lpf, synapse=Highpass(0.1), dt=None)

    with pytest.raises(ValueError):
        ss2sim(sys=~z, synapse=lpf, dt=None)

    with pytest.raises(ValueError):
        ss2sim(sys=lpf, synapse=~z, dt=None)

    with pytest.raises(ValueError):
        ss2sim(sys=~z, synapse=~z, dt=1.)
Exemple #19
0
def test_impulse():
    dt = 0.001
    tau = 0.005
    length = 500

    delta = np.zeros(length)
    delta[0] = 1. / dt

    sys = Lowpass(tau)
    response = sys.impulse(length, dt)
    assert not np.allclose(response[0], 0)

    # should give the same result as using filt
    assert np.allclose(response, sys.filt(delta, dt))

    # and should default to the same dt
    assert sys.default_dt == dt
    assert np.allclose(response, sys.impulse(length))

    # should also accept discrete systems
    dss = cont2discrete(sys, dt=dt)
    assert not dss.analog
    assert np.allclose(response, dss.impulse(length) / dt)
    assert np.allclose(response, dss.impulse(length, dt=dt))
Exemple #20
0
def test_principle3_discrete():
    sys = PadeDelay(0.1, order=5)

    tau = 0.01
    dt = 0.002
    syn = Lowpass(tau)

    FH = ss2sim(sys, syn, dt=dt)

    a = np.exp(-dt / tau)
    sys = cont2discrete(sys, dt=dt)
    assert np.allclose(FH.A, (sys.A - a * np.eye(len(sys))) / (1 - a))
    assert np.allclose(FH.B, sys.B / (1 - a))
    assert np.allclose(FH.C, sys.C)
    assert np.allclose(FH.D, sys.D)

    # We can also do the discretization ourselves and then pass in dt=None
    assert ss_equal(
        ss2sim(sys, cont2discrete(syn, dt=dt), dt=None), FH)
Exemple #21
0
def test_balred(rng):
    dt = 0.001
    sys = Alpha(0.01) + Lowpass(0.001)

    u = rng.normal(size=2000)
    expected = sys.filt(u, dt)

    def check(order, within, tol, method='del'):
        redsys = balred(sys, order, method=method)
        assert redsys.order_den <= order
        actual = redsys.filt(u, dt)
        assert abs(rmse(expected, actual) - within) < tol

    with warns(UserWarning):
        check(4, 0, 1e-13)
    with warns(UserWarning):
        check(3, 0, 1e-13)
    check(2, 0.03, 0.01)
    check(1, 0.3, 0.1)
Exemple #22
0
def make_normed_flipped(value=1.0, t=1.0, dt=0.001, N=1, f=Lowpass(0.01), seed=0):
    u1_list = np.zeros((int(t*N/dt)+1, 1))
    u2_list = np.zeros((int(t*N/dt)+1, 1))
    for n in range(N):
        stim_func1 = nengo.processes.WhiteSignal(period=t/2, high=1, rms=0.6, seed=seed+n)
        stim_func2 = nengo.processes.WhiteSignal(period=t/2, high=1, rms=0.6, seed=seed+100+n)
        with nengo.Network() as model:
            u1 = nengo.Node(stim_func1)
            u2 = nengo.Node(stim_func2)
            p1 = nengo.Probe(u1, synapse=None)
            p2 = nengo.Probe(u2, synapse=None)
        with nengo.Simulator(model, progress_bar=False, dt=dt) as sim:
            sim.run(t/2, progress_bar=False)
        x = f.filt(sim.data[p1] * sim.data[p2], dt=dt)
        norm = value / np.max(np.abs(x))
        u1n = sim.data[p1]*np.sqrt(norm)
        u2n = sim.data[p2]*np.sqrt(norm)
        u1_list[n*int(t/dt): (n+1)*int(t/dt)] = np.vstack((u1n, -u1n))
        u2_list[n*int(t/dt): (n+1)*int(t/dt)] = np.vstack((u2n, u2n))
    stim_func1 = lambda t: u1_list[int(t/dt)]
    stim_func2 = lambda t: u2_list[int(t/dt)]
    return stim_func1, stim_func2
Exemple #23
0
def dh_lstsq(stim_data, target_data, spk_data,
        lambda_c=1e-1, lambda_d=1e-1, order=1, n_samples=10000,
        min_d=-1e-2, max_d=1e-2, dt=0.001, h_tar=Lowpass(0.1), 
        mean_taus=[1e-1, 1e-2], std_taus=[1e-2, 1e-3], max_tau=1e0, lstsq_iter=100):
    
    """Courtesy of Aaron Voelker"""
    mean_taus = np.array(mean_taus)[:order]
    std_taus = np.array(std_taus)[:order]

    def sample_prior(n_samples, order, mean_taus, std_taus, min_tau=1e-5, rng=np.random.RandomState(seed=0)):
        """Return n samples (taus) from the prior of a k'th-order synapse."""
        taus = np.zeros((n_samples, order))
        for o in range(order):
            taus[:, o] = rng.normal(mean_taus[o], std_taus[o], size=(n_samples, )).clip(min_tau)
        return taus
    
    for att in range(lstsq_iter):  # attempts
        assert len(mean_taus) == order
        assert len(std_taus) == order
        taus = sample_prior(n_samples, order, mean_taus, std_taus)

        poles = -1. / taus
        n_steps = spk_data.shape[0]
        n_neurons = spk_data.shape[1]
        assert poles.shape == (n_samples, order)

        tf_params = np.zeros((n_samples, order))
        for i in range(n_samples):
            sys = LinearSystem(([], poles[i, :], 1 / np.prod(taus[i, :])))   # (zeros, poles, gain)
            assert len(sys) == order
            assert np.allclose(sys.dcgain, 1)
            den_normalized = np.asarray(sys.den / sys.num[0])
            assert len(den_normalized) == order + 1
            assert np.allclose(den_normalized[-1], 1)  # since normalized
            # tf_params ordered from lowest to highest, ignoring c_0 = 1, i.e., [c_1, ..., c_k]
            tf_params[i, :] = den_normalized[:-1][::-1]

        # We assume c_i are independent by setting the off-diagonals to zero
        C = np.cov(tf_params, rowvar=False)
        if order == 1:
            C = C*np.eye(1)
        Q = np.abs(np.linalg.inv(C))
        c0 = np.mean(tf_params, axis=0)
        d0 = np.ones((n_neurons, ))
        cd0 = np.hstack((c0, d0))
        assert Q.shape == (order, order)
        assert cd0.shape == (order+n_neurons,)

        diff = (1. - ~z) / dt
        A = np.zeros((n_steps, order + n_neurons))
        deriv_n = target_data
        for i in range(order):
            deriv_n = diff.filt(deriv_n, dt=dt)
            A[:, i] = deriv_n.ravel()  # todo: D>1
        for n in range(n_neurons):
            A[:, order+n] = spk_data[:, n]
        b = h_tar.tau  # set on pre_u ==> supv connection in network
        Y = (b*stim_data - target_data)
        A = h_tar.filt(A, dt=dt, axis=0)
        Y = h_tar.filt(Y, dt=dt)

        # construct block diagonal matrix with different regularizations for filter coefficients and decoders
        L = block_diag(lambda_c*Q, lambda_d*np.eye(n_neurons))
        gamma = A.T.dot(A) + L
        upsilon = A.T.dot(Y) + L.dot(cd0).reshape((order+n_neurons, 1))  # optional term with tikhonov regularization

        cd = np.linalg.inv(gamma).dot(upsilon).ravel()
        c_new = cd[:order]
        d_new = -1.*cd[-n_neurons:]
        assert c_new.shape==(order,)
        assert d_new.shape==(n_neurons,)
        print('taus attempt %s, nonzero d %s, tau=%s: '%(att, np.count_nonzero(d_new+1), c_new))
        for n in range(n_neurons):
            if d_new[n] > max_d or d_new[n] < min_d:
                d_new[n] = 0
        d_new = d_new.reshape((n_neurons, 1))
        if order == 1:
            h_new = Lowpass(c_new[0])
        elif order == 2:
            h_new = DoubleExp(c_new[0], c_new[1])
#         h_new = 1. / (1 + sum(c_new[i] * s**(i+1) for i in range(order)))
        assert np.allclose(h_new.dcgain, 1)
        if np.all(c_new > 0):
            break
        else:
            mean_taus[np.argmin(mean_taus)] *= 1.25
            lambda_c *= 1.25
            lambda_d *= 1.25

    return d_new, h_new
Exemple #24
0
def go(d_ens,
       f_ens,
       n_pre=100,
       n_neurons=30,
       t=10,
       m=Uniform(30, 40),
       i=Uniform(-1, 0.6),
       seed=1,
       dt=0.001,
       f=Lowpass(0.01),
       f_smooth=DoubleExp(1e-2, 2e-1),
       neuron_type=LIF(),
       w_ens=None,
       e_ens=None,
       w_ens2=None,
       e_ens2=None,
       L=False,
       L2=False,
       stim_func=lambda t: np.sin(t)):

    with nengo.Network(seed=seed) as model:

        # Stimulus and Nodes
        u = nengo.Node(stim_func)

        # Ensembles
        pre = nengo.Ensemble(n_pre, 1, radius=1.5, seed=seed)
        ens = nengo.Ensemble(n_neurons,
                             1,
                             max_rates=m,
                             intercepts=i,
                             neuron_type=neuron_type,
                             seed=seed)
        ens2 = nengo.Ensemble(n_neurons,
                              1,
                              max_rates=m,
                              intercepts=i,
                              neuron_type=neuron_type,
                              seed=seed + 1)
        supv = nengo.Ensemble(n_neurons,
                              1,
                              max_rates=m,
                              intercepts=i,
                              neuron_type=LIF(),
                              seed=seed)
        supv2 = nengo.Ensemble(n_neurons,
                               1,
                               max_rates=m,
                               intercepts=i,
                               neuron_type=LIF(),
                               seed=seed + 1)
        x = nengo.Ensemble(1, 1, neuron_type=nengo.Direct())
        x2 = nengo.Ensemble(1, 1, neuron_type=nengo.Direct())

        # Connections
        nengo.Connection(u, pre, synapse=None, seed=seed)
        conn = nengo.Connection(pre, ens, synapse=f, seed=seed)
        conn2 = nengo.Connection(ens,
                                 ens2,
                                 synapse=f_ens,
                                 seed=seed + 1,
                                 solver=NoSolver(d_ens))
        nengo.Connection(x, supv, synapse=None, seed=seed)
        nengo.Connection(x2, supv2, synapse=None, seed=seed + 1)
        nengo.Connection(u, x, synapse=f, seed=seed)
        nengo.Connection(x, x2, synapse=f, seed=seed + 1)

        # Probes
        p_u = nengo.Probe(u, synapse=None)
        p_ens = nengo.Probe(ens.neurons, synapse=None)
        p_v = nengo.Probe(ens.neurons, 'voltage', synapse=None)
        p_x = nengo.Probe(x, synapse=None)
        p_ens2 = nengo.Probe(ens2.neurons, synapse=None)
        p_v2 = nengo.Probe(ens2.neurons, 'voltage', synapse=None)
        p_x2 = nengo.Probe(x2, synapse=None)
        p_supv = nengo.Probe(supv.neurons, synapse=None)
        p_supv2 = nengo.Probe(supv2.neurons, synapse=None)

        # Bioneurons
        if L and isinstance(neuron_type, DurstewitzNeuron):
            node = LearningNode2(n_neurons, n_pre, conn, k=3e-6)
            nengo.Connection(pre.neurons, node[0:n_pre], synapse=f)
            nengo.Connection(ens.neurons,
                             node[n_pre:n_pre + n_neurons],
                             synapse=f_smooth)
            nengo.Connection(supv.neurons,
                             node[n_pre + n_neurons:n_pre + 2 * n_neurons],
                             synapse=f_smooth)
        if L2 and isinstance(neuron_type, DurstewitzNeuron):
            node2 = LearningNode2(n_neurons, n_neurons, conn2, k=3e-6)
            nengo.Connection(ens.neurons, node2[0:n_neurons], synapse=f_ens)
            nengo.Connection(ens2.neurons,
                             node2[n_neurons:2 * n_neurons],
                             synapse=f_smooth)
            nengo.Connection(supv2.neurons,
                             node2[2 * n_neurons:3 * n_neurons],
                             synapse=f_smooth)

    with nengo.Simulator(model, seed=seed, dt=dt) as sim:
        if np.any(w_ens):
            for pre in range(n_pre):
                for post in range(n_neurons):
                    conn.weights[pre, post] = w_ens[pre, post]
                    conn.netcons[pre, post].weight[0] = np.abs(w_ens[pre,
                                                                     post])
                    conn.netcons[pre,
                                 post].syn().e = 0 if w_ens[pre,
                                                            post] > 0 else -70
        if np.any(w_ens2):
            for pre in range(n_neurons):
                for post in range(n_neurons):
                    conn2.weights[pre, post] = w_ens2[pre, post]
                    conn2.netcons[pre, post].weight[0] = np.abs(w_ens2[pre,
                                                                       post])
                    conn2.netcons[
                        pre,
                        post].syn().e = 0 if w_ens2[pre, post] > 0 else -70
        if np.any(e_ens):
            conn.e = e_ens
        if np.any(e_ens2):
            conn2.e = e_ens2
        neuron.h.init()
        sim.run(t)
        reset_neuron(sim, model)

    if L and hasattr(conn, 'weights'):
        w_ens = conn.weights
        e_ens = conn.e
    if L2 and hasattr(conn2, 'weights'):
        w_ens2 = conn2.weights
        e_ens2 = conn2.e

    return dict(
        times=sim.trange(),
        u=sim.data[p_u],
        ens=sim.data[p_ens],
        v=sim.data[p_v],
        x=sim.data[p_x],
        ens2=sim.data[p_ens2],
        v2=sim.data[p_v2],
        x2=sim.data[p_x2],
        supv=sim.data[p_supv],
        supv2=sim.data[p_supv2],
        w_ens=w_ens,
        e_ens=e_ens,
        w_ens2=w_ens2,
        e_ens2=e_ens2,
    )
Exemple #25
0
    with Simulator(model, dt=dt) as sim:
        sim.run(T)

    # lower bound includes both approximation error and the gap between
    # random {-1, 1} flip-flop inputs and the true worst-case input
    worst_x = np.max(abs(sim.data[p]), axis=0)
    assert (lower <= worst_x + eps).all()
    assert (worst_x <= 1 + eps).all()


def test_l1_repr():
    assert (repr(L1Norm(rtol=.1,
                        max_length=10)) == "L1Norm(rtol=0.1, max_length=10)")


@pytest.mark.parametrize("sys,lower", [(Lowpass(0.005), 1.0),
                                       (Alpha(0.01), 0.3),
                                       (Bandpass(50, 5), 0.05),
                                       (Highpass(0.01, 4), 0.1),
                                       (PadeDelay(0.1, 2, 2), 0.3)])
def test_hankel_normalization(Simulator, rng, sys, lower):
    _test_normalization(Simulator,
                        sys,
                        rng,
                        Hankel(),
                        l1_lower=0.5,
                        lower=lower)


@pytest.mark.parametrize("radius", [0.5, 5, 10])
@pytest.mark.parametrize("sys", [Lowpass(0.005)])
Exemple #26
0
    FH = ss2sim(sys, syn, dt=dt)

    a = np.exp(-dt / tau)
    sys = cont2discrete(sys, dt=dt)
    assert np.allclose(FH.A, (sys.A - a * np.eye(len(sys))) / (1 - a))
    assert np.allclose(FH.B, sys.B / (1 - a))
    assert np.allclose(FH.C, sys.C)
    assert np.allclose(FH.D, sys.D)

    # We can also do the discretization ourselves and then pass in dt=None
    assert ss_equal(
        ss2sim(sys, cont2discrete(syn, dt=dt), dt=None), FH)


@pytest.mark.parametrize("sys", [PadeDelay(0.1, order=5), Lowpass(0.1)])
def test_doubleexp_continuous(sys):
    tau1 = 0.05
    tau2 = 0.02
    syn = DoubleExp(tau1, tau2)

    FH = ss2sim(sys, syn, dt=None)

    A = sys.A
    FHA = tau1 * tau2 * np.dot(A, A) + (tau1 + tau2) * A + np.eye(len(A))
    B = sys.B
    FHB = (tau1 * tau2 * A + (tau1 + tau2) * np.eye(len(A))).dot(B)
    assert np.allclose(FH.A, FHA)
    assert np.allclose(FH.B, FHB)
    assert np.allclose(FH.C, sys.C)
    assert np.allclose(FH.D, sys.D)
Exemple #27
0
def test_invalid_modred():
    with pytest.raises(ValueError):
        modred(Lowpass(0.1), 0, method='zoh')
Exemple #28
0
def test_invalid_balred():
    with pytest.raises(ValueError):
        balred(Lowpass(0.1), 0)
Exemple #29
0
def go(name, tau, factory, recurrent_solver=Default,
       pre_fixture=None, post_fixture=None):

    set_style()
 
    theta = 0.1
    order = 3
    freq = 3
    power = 1.0  # chosen to keep radii within [-1, 1]

    # print("PadeDelay(%s, %s) => %f%% error @ %sHz" % (
    #     theta, order, 100*abs(pade_delay_error(theta*freq, order=order)), freq))
    pd = PadeDelay(theta=theta, order=order)

    # Heuristic for normalizing state so that each dimension is ~[-1, +1]
    rz = Balanced()(pd, radii=1./(np.arange(len(pd))+1))
    sys = rz.realization

    # Compute matrix to transform from state (x) -> sampled window (u)
    t_samples = 100
    C = np.asarray([readout(len(pd), r)
                    for r in np.linspace(0, 1, t_samples)]).dot(rz.T)
    assert C.shape == (t_samples, len(sys))


    n_neurons = 128  # per dimension
    map_hw = ss2sim(sys, synapse=Lowpass(tau), dt=None)  # analog mapping
    assert np.allclose(map_hw.A, tau*sys.A + np.eye(len(sys)))
    assert np.allclose(map_hw.B, tau*sys.B)

    with nengo.Network() as model:
        if pre_fixture is not None:
            pre_fixture(model)

        u = nengo.Node(output=0, label='u')
        p_u = nengo.Probe(u, synapse=None)
        
        # This is needed because a single node can't connect to multiple
        # different ensembles. We need a separate node for each ensemble.
        Bu = [nengo.Node(output=lambda _, u, b_i=map_hw.B[i].squeeze(): b_i*u,
                         size_in=1, label='Bu[%d]' % i)
              for i in range(len(sys))]
        
        X = []
        for i in range(len(sys)):
            ens = nengo.Ensemble(
                n_neurons=n_neurons, dimensions=1, label='X[%d]' % i)

            X.append(ens)
     
        P = []
        for i in range(len(sys)):
            nengo.Connection(u, Bu[i], synapse=None)
            nengo.Connection(Bu[i], X[i], synapse=tau)
            for j in range(len(sys)):
                nengo.Connection(X[j], X[i], synapse=tau,
                                 function=lambda x_j, a_ij=map_hw.A[i, j]: a_ij*x_j,
                                 solver=recurrent_solver)
            P.append(nengo.Probe(X[i], synapse=None))


    def do_trial(name, seed, length=2000, dt=0.001, tau_probe=0.02,
                 sanity=False, **kwargs):
        # Note: depends on the globals, (factory, C, model, u, p_u, P, sys)

        process = nengo.processes.WhiteSignal(
            period=length*dt, rms=power, high=freq, y0=0, seed=seed)

        test_u = process.run_steps(length, dt=dt)
        x_ideal = sys.X.filt(test_u, dt=dt)

        if sanity:
            analyze("ideal-%s" % name, 
                    t=process.ntrange(length, dt=dt),
                    u=test_u,
                    x_hat=x_ideal,
                    x_ideal=x_ideal,
                    C=C,
                    theta=theta,
                    dump_file=False,
                    do_plot=False)
            
        u.output = process

        with factory(network=model, dt=dt) as sim:
            sim.run(length*dt)
            if post_fixture:
                post_fixture(sim)

        assert np.allclose(test_u, np.asarray(sim.data[p_u]))

        # Use discrete principle 3, offline, to get x_hat
        # from the unfiltered spikes representing x.
        # This is analagous to probing the PSC, pre-encoding.
        syn_probe = Lowpass(tau_probe)
        map_out = ss2sim(sys, synapse=syn_probe, dt=dt)
        x_raw = np.asarray([sim.data[p] for p in P]).squeeze()
        f = map_out.A.dot(x_raw) + map_out.B.dot(test_u.T)
        x_hat = syn_probe.filt(f, axis=1, dt=dt).T

        return analyze(
            name=name, t=sim.trange(), u=test_u,
            x_hat=x_hat, x_ideal=x_ideal, C=C,
            theta=theta, **kwargs)


    data = defaultdict(list)
    for trial in range(25):
        for seed in range(1, 11):
            data['Trial'].append(trial)
            data['Test Case (#)'].append(seed)
            data['NRMSE'].append(
                do_trial(name="scratch-%s-DN-%d-%d" % (name, trial, seed),
                         seed=seed, dump_file=False))

    df = DataFrame(data)
    df.to_pickle(datapath("%s-delay-network.pkl" % name))

    return bs.bootstrap(np.asarray(df['NRMSE']),
                        stat_func=bs_stats.mean, alpha=1-0.95)  # 95% CI
def test_basic(Simulator, seed, plt):
    # solve for a standard nengo connection using a feed-forward reservoir

    train_t = 5.0
    test_t = 0.5
    dt = 0.001

    n_neurons = 100
    synapse = 0.01

    process = WhiteSignal(max(train_t, test_t), high=10, rms=0.3)

    def function(x):
        return x**2

    with Network() as model:
        ens = nengo.Ensemble(n_neurons, 1, seed=seed)  # <- must have seed!
        res = Reservoir(ens, ens.neurons, synapse)

        # Solve for the readout that approximates a function of the *filtered*
        # stimulus. We include a lowpass here because the final RMSE will be
        # with respect to the lowpass stimulus, which is also consistent
        # with what the NEF is doing. But in a general recurrent reservoir
        # this filter could hypothetically be anything.
        res.train(lambda x: function(Lowpass(synapse).filt(x, dt=dt)),
                  train_t,
                  dt,
                  process,
                  seed=seed + 1)

        assert res.size_in == 1
        assert res.size_mid == n_neurons
        assert res.size_out == 1

        # Validation
        _, (_, _, check_output) = res.run(test_t, dt, process, seed=seed + 2)

        stim = nengo.Node(output=process)
        output = nengo.Node(size_in=1)

        nengo.Connection(stim, ens, synapse=None)
        nengo.Connection(ens, output, function=function, synapse=None)

        # note the reservoir output already includes a synapse
        p_res = nengo.Probe(res.output, synapse=None)
        p_normal = nengo.Probe(output, synapse=synapse)
        p_stim = nengo.Probe(stim, synapse=synapse)

    with Simulator(model, dt=dt, seed=seed + 2) as sim:
        sim.run(test_t)

    # Since the seed for the two test processes were the same, the validation
    # run should produce the same output as the test simulation.
    assert np.allclose(check_output, sim.data[p_res])

    ideal = function(sim.data[p_stim])

    plt.figure()
    plt.plot(sim.trange(), sim.data[p_res], label="Reservoir")
    plt.plot(sim.trange(), sim.data[p_normal], label="Standard")
    plt.plot(sim.trange(), ideal, label="Ideal")
    plt.legend()

    assert np.allclose(rmse(sim.data[p_res], ideal),
                       rmse(sim.data[p_normal], ideal),
                       atol=1e-2)