def test_linear_network(Simulator, plt, seed, rng, neuron_type, atol, atol_x):
    n_neurons = 500
    dt = 0.001
    T = 1.0

    sys = Lowpass(0.1)
    scale_input = 2.0

    synapse = 0.02
    tau_probe = 0.005

    with Network(seed=seed) as model:
        stim = nengo.Node(
            output=nengo.processes.WhiteSignal(T, high=10, seed=seed))
        subnet = LinearNetwork(sys,
                               n_neurons_per_ensemble=n_neurons,
                               synapse=synapse,
                               input_synapse=synapse,
                               dt=dt,
                               neuron_type=neuron_type)
        nengo.Connection(stim,
                         subnet.input,
                         synapse=None,
                         transform=scale_input)

        assert subnet.synapse == subnet.input_synapse
        assert subnet.output_synapse is None

        p_input = nengo.Probe(subnet.input, synapse=tau_probe)
        p_x = nengo.Probe(subnet.state.output, synapse=tau_probe)
        p_output = nengo.Probe(subnet.output, synapse=tau_probe)

    with Simulator(model, dt=dt) as sim:
        sim.run(T)

    ideal_output = shift(sys.filt(sim.data[p_input]))
    ideal_x = shift(subnet.realization.X.filt(sim.data[p_input]))

    plt.plot(sim.trange(), sim.data[p_input], label="Input", alpha=0.5)
    plt.plot(sim.trange(), sim.data[p_output], label="Actual y", alpha=0.5)
    plt.plot(sim.trange(),
             ideal_output,
             label="Expected y",
             alpha=0.5,
             linestyle='--')
    plt.plot(sim.trange(), sim.data[p_x], label="Actual x", alpha=0.5)
    plt.plot(sim.trange(),
             ideal_x,
             label="Expected x",
             alpha=0.5,
             linestyle='--')
    plt.legend()

    assert nrmse(sim.data[p_output], ideal_output) < atol
    assert nrmse(sim.data[p_x].squeeze(), ideal_x.squeeze()) < atol_x
示例#2
0
def _test_temporal_solver(plt, Simulator, seed, neuron_type, tau, f, solver):
    dt = 0.002

    # we are cheating a bit here because we'll use the same training data as
    # test data. this makes the unit testing a bit simpler since it's more
    # obvious what will happen when comparing temporal to default
    t = np.arange(0, 0.2, dt)
    stim = np.sin(2 * np.pi * 10 * t)
    function = (f(stim) if tau is None else nengo.Lowpass(tau).filt(f(stim),
                                                                    dt=dt))

    with Network(seed=seed) as model:
        u = nengo.Node(output=nengo.processes.PresentInput(stim, dt))
        x = nengo.Ensemble(100, 1, neuron_type=neuron_type)
        output_ideal = nengo.Node(size_in=1)

        post = dict(n_neurons=500,
                    dimensions=1,
                    neuron_type=nengo.LIFRate(),
                    seed=seed + 1)
        output_temporal = nengo.Ensemble(**post)
        output_default = nengo.Ensemble(**post)

        nengo.Connection(u, output_ideal, synapse=tau, function=f)
        nengo.Connection(u, x, synapse=None)
        nengo.Connection(x,
                         output_temporal,
                         synapse=tau,
                         eval_points=stim[:, None],
                         function=function[:, None],
                         solver=Temporal(synapse=tau, solver=solver))
        nengo.Connection(x,
                         output_default,
                         synapse=tau,
                         eval_points=stim[:, None],
                         function=f,
                         solver=solver)

        p_ideal = nengo.Probe(output_ideal, synapse=None)
        p_temporal = nengo.Probe(output_temporal, synapse=None)
        p_default = nengo.Probe(output_default, synapse=None)

    with Simulator(model, dt) as sim:
        sim.run(t[-1])

    plt.plot(sim.trange(),
             sim.data[p_ideal] - sim.data[p_default],
             label="Default")
    plt.plot(sim.trange(),
             sim.data[p_ideal] - sim.data[p_temporal],
             label="Temporal")
    plt.legend()

    return (nrmse(sim.data[p_default], target=sim.data[p_ideal]) /
            nrmse(sim.data[p_temporal], target=sim.data[p_ideal]))
def test_direct_window(legendre, Simulator, seed, plt):
    theta = 1.0
    T = theta

    assert np.allclose(t_default, np.linspace(0, 1, 1000))

    with Network() as model:
        stim = nengo.Node(output=lambda t: t)
        rw = RollingWindow(theta, n_neurons=1, dimensions=12,
                           neuron_type=nengo.Direct(), process=None,
                           legendre=legendre)
        assert rw.theta == theta
        assert rw.dt == 0.001
        assert rw.process is None
        assert rw.synapse == nengo.Lowpass(0.1)
        assert rw.input_synapse == nengo.Lowpass(0.1)

        nengo.Connection(stim, rw.input, synapse=None)
        output = rw.add_output(function=lambda w: np.sum(w**3)**2)
        p_output = nengo.Probe(output, synapse=None)

    with Simulator(model) as sim:
        sim.run(T)

    actual = sim.data[p_output].squeeze()
    t = sim.trange()
    ideal = shift(np.cumsum(t**3)**2)

    plt.figure()
    plt.plot(t, actual, label="Output")
    plt.plot(t, ideal, label="Ideal", linestyle='--')
    plt.legend()

    assert nrmse(actual, ideal) < 0.005
示例#4
0
文件: dn.py 项目: goodluckcjj/phd
def analyze(name, t, u, x_hat, x_ideal, C, theta,
            dump_file=True, do_plot=True):
    # print("Radii:", np.max(np.abs(x_hat), axis=0))
    # assert C.shape == (t_samples, order)

    w = C.dot(x_hat.T)
    w_ideal = C.dot(x_ideal.T)
    t_samples, order = C.shape
    
    if do_plot:
        top_cmap = sns.color_palette('GnBu_d', t_samples)[::-1]
        fig, ax = plt.subplots(2, 1, sharex=True, figsize=(3.5, 3.5))
        for c, w_i in list(zip(top_cmap, w))[::-1]:
            ax[0].plot(t, w_i, c=c, alpha=0.7)
        target_line, = ax[0].plot(t, u, c='green', linestyle='--', lw=1)
        ax[0].set_ylim(np.min(w), np.max(w) + 1)

        from mpl_toolkits.axes_grid1.inset_locator import inset_axes
        insert = inset_axes(ax[0], width="25%", height=0.3, loc='upper right')
        insert.patch.set_alpha(0.8)
        insert.xaxis.tick_top()
        insert.tick_params(axis='x', labelsize=4)
        insert.tick_params(axis='y', labelsize=4)
        insert.xaxis.set_label_position('top') 
        t_window = np.linspace(0, theta, t_samples)
        e_window = nrmse(w, target=w_ideal, axis=1)
        for i in range(1, t_samples):
            insert.plot([t_window[i-1], t_window[i]],
                        [e_window[i-1], e_window[i]],
                        c=top_cmap[i])
        insert.set_xlabel("Delay Length (s)", size=4)
        insert.set_ylabel("NRMSE", size=4)
        #insert.set_ylim(0, max(e_window))

        bot_cmap = sns.color_palette('bright', order)
        for i in range(order):
            ax[1].plot(t, x_hat[:, i], c=bot_cmap[i], alpha=0.9)
            ax[1].plot(t, x_ideal[:, i], c=bot_cmap[i], linestyle='--', lw=1)

        ax[0].set_title("Delay Network")
        ax[1].set_title("State Vector")
        ax[-1].set_xlabel("Time (s)")

        top_lc = LineCollection(
            len(C) * [[(0, 0)]], lw=1, colors=top_cmap)
        ax[0].legend([target_line, top_lc], ["Input", "Output"],
                     handlelength=3.2, loc='lower right',
                     handler_map={LineCollection: HandlerDashedLines()})

        bot_lc_ideal = LineCollection(
            order * [[(0, 0)]], lw=1, colors=bot_cmap, linestyle='--')
        bot_lc_actual = LineCollection(
            order * [[(0, 0)]], lw=1, colors=bot_cmap)
        ax[1].legend([bot_lc_ideal, bot_lc_actual], ["Ideal", "Actual"],
                     handlelength=3.2, loc='lower right',
                     handler_map={LineCollection: HandlerDashedLines()})

        fig.savefig('%s.pdf' % name, dpi=600, bbox_inches='tight')

    if dump_file:
        np.savez("%s-%s" % (name, time.time()),
                 t=t, u=u, x_hat=x_hat, x_ideal=x_ideal, C=C)

    return nrmse(w.flatten(), target=w_ideal.flatten())
def test_window_example(legendre, Simulator, seed, plt):
    theta = 0.1
    n_neurons = 1000
    d = 6
    high = 10

    T = 1.0
    dt = 0.002
    tau_probe = None

    # we set the radii here for testing on nengo<2.4.0
    # (see warning in RollingWindow._make_core)
    radii = 0.3

    with Network(seed=seed) as model:
        stim = nengo.Node(output=WhiteSignal(T, high=high, seed=seed))

        rw_rate = RollingWindow(
            theta=theta, n_neurons=n_neurons, dimensions=d, radii=radii,
            neuron_type=nengo.LIFRate(), dt=dt, process=stim.output,
            legendre=legendre)
        assert isinstance(rw_rate.state.eval_points, EvalPoints)
        assert isinstance(rw_rate.state.encoders, Encoders)

        rw_drct = RollingWindow(
            theta=theta, n_neurons=1, dimensions=d, radii=radii,
            neuron_type=nengo.Direct(), dt=dt, process=None,
            legendre=legendre)

        def function(w):
            return -np.max(np.abs(w)), abs(w[0]*w[-1])

        nengo.Connection(stim, rw_rate.input, synapse=None)
        nengo.Connection(stim, rw_drct.input, synapse=None)

        delay_rate = rw_rate.add_output(t=1)
        delay_drct = rw_drct.output  # rw_drct.add_output('delay', t=1)

        output_rate = rw_rate.add_output(function=function)
        output_drct = rw_drct.add_output(function=function)

        p_stim = nengo.Probe(stim, synapse=tau_probe)
        p_delay_rate = nengo.Probe(delay_rate, synapse=tau_probe)
        p_delay_drct = nengo.Probe(delay_drct, synapse=tau_probe)
        p_output_rate = nengo.Probe(output_rate, synapse=tau_probe)
        p_output_drct = nengo.Probe(output_drct, synapse=tau_probe)
        p_state_rate = nengo.Probe(rw_rate.state, synapse=tau_probe)
        p_state_drct = nengo.Probe(rw_drct.state, synapse=tau_probe)

    with Simulator(model, dt=dt) as sim:
        sim.run(T)

    plt.subplot(3, 1, 1)
    plt.plot(sim.trange(), sim.data[p_stim], label="Input")
    plt.plot(sim.trange(), sim.data[p_delay_rate], label="Rate")
    plt.plot(sim.trange(), sim.data[p_delay_drct], label="Direct",
             linestyle='--')
    plt.legend()

    plt.subplot(3, 1, 2)
    plt.plot(sim.trange(), sim.data[p_output_rate], label="Rate")
    plt.plot(sim.trange(), sim.data[p_output_drct], label="Direct",
             linestyle='--')
    plt.legend()

    plt.subplot(3, 1, 3)
    plt.plot(sim.trange(), sim.data[p_state_rate], label="Rate", lw=1)
    plt.plot(sim.trange(), sim.data[p_state_drct], label="Direct",
             linestyle='--')

    assert nrmse(sim.data[p_delay_rate], sim.data[p_delay_drct]) < 0.05
    assert nrmse(sim.data[p_output_rate], sim.data[p_output_drct]) < 0.2
    assert nrmse(sim.data[p_state_rate], sim.data[p_state_drct]) < 0.05
示例#6
0
def go(seed, theta):
    # Task parameters
    T_train = 10
    T_total = 15
    dt = 0.005

    amplitude = 1
    process = nengo.processes.WhiteSignal(T_total, high=1, rms=0.5, y0=0)

    # Fixed model parameters
    n = 500
    rng = np.random.RandomState(seed)
    ens_kwargs = dict(  # neuron parameters
        n_neurons=n,
        dimensions=1,
        neuron_type=nengolib.neurons.Tanh(),
        intercepts=[-1]*n,  # intercepts are irelevant for Tanh
        seed=seed,
    )

    # Hyper-parameters
    tau = 0.01                  # lowpass time-constant (10ms in [1])
    tau_learn = None            # filter for error / learning (needed for spiking)
    tau_probe = None            # filter for readout (needed for spiking
    learning_rate = 0.001       # 1 in [1]
    g = 1.5 # / 400             # 1.5 in [1] (scale by firing rates for spiking)
    g_in = 1 # tau / amplitude  # scale the input encoders (usually 1)
    g_out = 1.0                 # scale the recurrent encoders (usually 1)
    q = 6                       # NEF solution (with linear units)
    
    with Network(seed=seed) as model:
        u = nengo.Node(output=process)

        z = nengo.Node(size_in=1)
        nengo.Connection(u, z, synapse=nengolib.synapses.DiscreteDelay(int(theta/dt)))

        ref = nengo.Node(size_in=1)
        nengo.Connection(u, ref, synapse=nengolib.synapses.PadeDelay(theta, order=q))

    # Initial weights
    e_in = g_in * rng.uniform(-1, +1, (n, 1))  # fixed encoders for f_in (u_in)
    e_out = g_out * rng.uniform(-1, +1, (n, 1))  # fixed encoders for f_out (u)
    JD = rng.randn(n, n) * g / np.sqrt(n)  # target-generating weights (variance g^2/n)
    
    with model:
        xC = nengo.Ensemble(**ens_kwargs)
        sC = nengo.Node(size_in=n)  # pre filter
        eC = nengo.Node(size_in=1, output=lambda t, e: e if t < T_train else 0)
        zC = nengo.Node(size_in=1)  # learned output

        nengo.Connection(u, sC, synapse=None, transform=e_in)
        nengo.Connection(sC, xC.neurons, synapse=tau)
        nengo.Connection(xC.neurons, sC, synapse=None, transform=JD)  # chaos
        connC = nengo.Connection(
            xC.neurons, zC, synapse=None, transform=np.zeros((1, n)),
            learning_rule_type=RLS(learning_rate=learning_rate, pre_synapse=tau_learn))
        nengo.Connection(zC, sC, synapse=None, transform=e_out)

        nengo.Connection(zC, eC, synapse=None)  # actual
        nengo.Connection(z, eC, synapse=None, transform=-1)  # ideal
        nengo.Connection(eC, connC.learning_rule, synapse=tau_learn)
        
    with model:    
        xR = nengo.Ensemble(**ens_kwargs)
        sR = nengo.Node(size_in=n)  # pre filter

        nengo.Connection(u, sR, synapse=None, transform=e_in)
        # nengo.Connection(z, sR, synapse=None, transform=e_out)  # <- don't reencode the input!
        nengo.Connection(sR, xR.neurons, synapse=tau)
        nengo.Connection(xR.neurons, sR, synapse=None, transform=JD)
        
    with model:
        xD = nengo.Ensemble(**ens_kwargs)
        sD = nengo.Node(size_in=n)  # pre filter

        nengo.Connection(u, sD, synapse=None, transform=e_in)
        nengo.Connection(z, sD, synapse=None, transform=e_out)
        nengo.Connection(sD, xD.neurons, synapse=tau)
        nengo.Connection(xD.neurons, sD, synapse=None, transform=JD)

    with model:
        xF = nengo.Ensemble(**ens_kwargs)
        sF = nengo.Node(size_in=n)  # pre filter
        eF = nengo.Node(size_in=n, output=lambda t, e: e if t < T_train else np.zeros_like(e))

        nengo.Connection(u, sF, synapse=None, transform=e_in)
        nengo.Connection(sF, xF.neurons, synapse=tau)
        connF = nengo.Connection(
            xF.neurons, sF, synapse=None, transform=np.zeros((n, n)),
            learning_rule_type=RLS(learning_rate=learning_rate, pre_synapse=tau_learn))

        nengo.Connection(sF, eF, synapse=None)  # actual
        nengo.Connection(sD, eF, synapse=None, transform=-1)  # ideal
        nengo.Connection(eF, connF.learning_rule, synapse=tau_learn)   
        
    with model:
        # Probes
        p_z = nengo.Probe(z, synapse=tau_probe)
        p_zC = nengo.Probe(zC, synapse=tau_probe)
        p_xF = nengo.Probe(xF.neurons, synapse=tau_probe)
        p_xR = nengo.Probe(xR.neurons, synapse=tau_probe)
        p_ref = nengo.Probe(ref, synapse=tau_probe)

    with nengo.Simulator(model, dt=dt, seed=seed) as sim:
        sim.run(T_total)
        
    # We do the readout training for full-FORCE offline, since this gives better
    # performance without affecting anything else
    t_train = sim.trange() < T_train
    t_test = sim.trange() >= T_train

    solver = nengo.solvers.LstsqL2(reg=1e-2)
    wF, _ = solver(sim.data[p_xF][t_train], sim.data[p_z][t_train])
    zF = sim.data[p_xF].dot(wF)

    wR, _ = solver(sim.data[p_xR][t_train], sim.data[p_z][t_train])
    zR = sim.data[p_xR].dot(wR)

    return (
        ('Classic-FORCE', nrmse(sim.data[p_zC][t_test], target=sim.data[p_z][t_test])),
        ('Full-FORCE', nrmse(zF[t_test], target=sim.data[p_z][t_test])),
        ('No-FORCE', nrmse(zR[t_test], target=sim.data[p_z][t_test])),
        ('NEF (n=%d)' % q, nrmse(sim.data[p_ref][t_test], target=sim.data[p_z][t_test])),
    )
示例#7
0
def figure_delay_example(targets):
    npfile = np.load(DELAY_EXAMPLE_SIM)
    theta = npfile['theta']
    dt = npfile['dt']
    t = npfile['t']
    u = npfile['u']
    x = npfile['x']
    a = npfile['a']
    y = npfile['y']
    T = t[-1]

    ideal = ideal_delay(u, theta, dt)
    logging.info("Paper constant: NRMSE: %s", nrmse(y, target=ideal))

    n_encoders = 1000
    rng = np.random.RandomState(seed=0)
    encoders = sphere.sample(n_encoders, x.shape[1], rng=rng)
    sims = np.dot(x, encoders.T)
    order = np.argsort(np.argmax(sims, axis=0))
    intercept = -1
    sims = sims.clip(intercept)
    sims -= np.min(sims, axis=0)
    sims /= np.max(sims, axis=0)
    a = sims[:, order]
    assert np.isfinite(a).all()

    sample_rate = 50  # downsample PDFs to avoid reader lag
    t = t[::sample_rate]
    u = u[::sample_rate]
    x = x[::sample_rate]
    a = a[::sample_rate]
    y = y[::sample_rate]
    ideal = ideal[::sample_rate]

    cmap = sns.diverging_palette(220, 20, as_cmap=True)

    with sns.axes_style('ticks'):
        with sns.plotting_context('paper', font_scale=2.8):
            f, (ax1, ax2, ax3) = pylab.subplots(3, 1, figsize=(18, 16))

            cpal = sns.color_palette(None, 2)
            ax1.plot(t, u, label=r"$u(t)$", c=cpal[0], lw=4, alpha=0.8)
            ax1.plot(t,
                     ideal,
                     label=r"$u(t - %s)$" % theta,
                     c=cpal[0],
                     lw=4,
                     linestyle='--',
                     zorder=2)  # on top
            ax1.plot(t, y, c=cpal[1], lw=4, label=r"$y(t)$", zorder=1)
            ax1.set_xlim(t[0], t[-1])
            ax1.set_ylim(-1, 1)
            ax1.set_xticks([])
            ax1.set_ylabel("Input / Output", labelpad=20)
            ax1.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)

            for i in range(x.shape[1]):
                ax2.plot(t,
                         x[:, -i],
                         label=r"$\hat{x}_%d(t)$" % (i + 1),
                         lw=2,
                         alpha=0.8)
            ax2.set_xlim(t[0], t[-1])
            ax2.set_ylim(-1, 1)
            ax2.set_xticks([])
            ax2.set_ylabel("Decoded State", labelpad=20)
            ax2.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)

            ax3.imshow(a.T, cmap=cmap, aspect='auto', interpolation='none')
            ax3.set_xlim(0, len(t))
            ax3.set_xticklabels(np.linspace(0, T, 9))
            ax3.set_xlabel("Time [s]", labelpad=20)
            ax3.set_ylabel(r"Cell \#", labelpad=20)

            segs = 100
            lc = LineCollection(segs * [[(0, 0)]],
                                lw=10,
                                colors=cmap(np.linspace(0, 1, segs)))
            ax3.legend([lc], ['Activity'],
                       handlelength=2,
                       handler_map={type(lc): HandlerDashedLines()},
                       bbox_to_anchor=(1.02, 1),
                       loc=2,
                       borderaxespad=0.)

            sns.despine(offset=15)

            savefig(targets[0])
示例#8
0
def figure_principle3(targets):
    theta = 0.1
    tau = 0.1 * theta
    lmbda = tau
    orders = range(6, 28)

    freqs = np.linspace(0.1 / theta, 16 / theta, 1000)
    s = 2.j * np.pi * freqs

    y = np.exp(-theta * s)
    Hinvs = (tau * s + 1) * np.exp(lmbda * s)

    cmap_lamb = sns.color_palette("GnBu_d", len(orders))[::-1]
    cmap_ignore = sns.color_palette("OrRd_d", len(orders))[::-1]

    data = np.empty((2, len(DISCRETE_DTS), DISCRETE_SEEDS))

    for seed in range(DISCRETE_SEEDS):
        for i, dt in enumerate(DISCRETE_DTS):
            npfile = np.load(DISCRETE_SIM % (seed, i))
            assert np.allclose(npfile['dt'], dt)
            delay = npfile['delay']
            # t = npfile['t']
            stim = npfile['stim']
            disc = npfile['disc']
            cont = npfile['cont']

            target = ideal_delay(stim, delay, dt)
            e_disc = nrmse(disc, target=target)
            e_cont = nrmse(cont, target=target)

            data[0, i, seed] = e_disc
            data[1, i, seed] = e_cont

    i = np.where(DISCRETE_DTS == 0.001)[0][0]
    assert np.allclose(DISCRETE_DTS[i], 0.001)
    e_disc = np.mean(data, axis=2)[0, i]
    e_cont = np.mean(data, axis=2)[1, i]
    improvement = (e_cont - e_disc) / e_cont * 100
    logging.info("Paper constant: Improvement at 1 ms: %s (%s -> %s)",
                 improvement, e_cont, e_disc)

    with sns.axes_style('ticks'):
        with sns.plotting_context('paper', font_scale=2.8):
            f, (ax1, ax2) = pylab.subplots(1, 2, figsize=(18, 5))

            ax1.set_title("Discrete Lowpass Improvement").set_y(1.05)

            for i, condition, cpal, marker in ((1, 'Principle 3',
                                                sns.color_palette("OrRd_d"),
                                                'X'),
                                               (0, 'Extension',
                                                sns.color_palette("GnBu_d"),
                                                'o')):
                sns.tsplot(data[i].T,
                           1000 * DISCRETE_DTS,
                           condition=condition,
                           color=cpal,
                           marker=marker,
                           markersize=15,
                           lw=3,
                           ci=95,
                           alpha=0.7,
                           ax=ax1)

            ax1.vlines([1.0],
                       np.min(data[0]),
                       2.0,
                       linestyle='--',
                       color='black',
                       lw=4,
                       alpha=0.7,
                       zorder=0)

            ax1.set_xlabel("Discrete Time-step [ms]", labelpad=20)
            ax1.set_ylabel("Absolute Error", labelpad=20)
            ax1.set_xlim(0, 1000 * DISCRETE_DTS[-1] + 0.1)

            ax2.set_title("Delayed Lowpass Improvement").set_y(1.05)

            for i, q in enumerate(orders):
                sys = PadeDelay(theta, order=q)
                mapped = ss2sim(sys, Lowpass(tau), dt=None)
                lambert = lambert_delay(theta, lmbda, tau, q - 1, q)

                y_lamb = lambert(Hinvs)
                y_ignore = mapped(Hinvs)

                ax2.semilogy(freqs * theta,
                             abs(y - y_ignore),
                             lw=2,
                             alpha=0.8,
                             zorder=len(orders) - i,
                             c=cmap_ignore[i])
                ax2.semilogy(freqs * theta,
                             abs(y - y_lamb),
                             lw=2,
                             alpha=0.8,
                             zorder=len(orders) - i,
                             c=cmap_lamb[i])

            lc_ignore = LineCollection(len(orders) * [[(0, 0)]],
                                       lw=10,
                                       colors=cmap_ignore)
            lc_lamb = LineCollection(len(orders) * [[(0, 0)]],
                                     lw=10,
                                     colors=cmap_lamb)
            ax2.legend([lc_ignore, lc_lamb], ['Principle 3', 'Extension'],
                       handlelength=2,
                       handler_map={LineCollection: HandlerDashedLines()})

            ax2.set_xlabel(r"Frequency $\times \, \theta$ [Hz $\times$ s]",
                           labelpad=20)

            sns.despine(offset=15)

            savefig(targets[0])
示例#9
0
def figure_lambert(targets):
    npfile = np.load(LAMBERT_SIM)
    delay = npfile['delay']
    dt = npfile['dt']
    t = npfile['t']
    stim = npfile['stim']
    delayed = npfile['delayed']
    lowpass = npfile['lowpass']
    dexp = npfile['dexp']

    target = ideal_delay(stim, delay, dt)

    e_delayed = nrmse(delayed, target=target)
    e_lowpass = nrmse(lowpass, target=target)
    e_dexp = nrmse(dexp, target=target)

    improvement = (e_lowpass - e_delayed) / e_lowpass * 100
    logging.info("Paper constant: Lambert improvement: %s", improvement)
    logging.info("Paper constant: Delayed NRMSE: %s", e_delayed)
    logging.info("Paper constant: Lowpass NRMSE: %s", e_lowpass)
    logging.info("Paper constant: Double Exp NRMSE: %s", e_dexp)

    sample_rate = 100
    t = t[::sample_rate]
    stim = stim[::sample_rate]
    delayed = delayed[::sample_rate]
    lowpass = lowpass[::sample_rate]
    dexp = dexp[::sample_rate]
    target = target[::sample_rate]

    tau_over_theta = 0.1
    lambda_over_tau = 1.0
    max_freq_times_theta = 4.0
    theta = 0.1  # <-- the graph is still the same, no matter theta!
    tau = tau_over_theta * theta
    lmbda = lambda_over_tau * tau
    max_freq = max_freq_times_theta / theta
    tau2 = tau / 5  # TODO: parameters copied from delayed_synapse()
    q = 6
    assert np.allclose(tau, 0.01)
    assert np.allclose(lmbda, 0.01)

    freqs = np.linspace(0, max_freq, 200)
    s = 2.j * np.pi * freqs
    axis = freqs * theta  # scale-invariant axis

    lw = 3
    alpha = 0.8
    cmap = sns.color_palette(None, 4)

    F_lamb = lambert_delay(theta, lmbda, tau, q - 1, q)
    F_low = ss2sim(PadeDelay(theta, order=q), Lowpass(tau), dt=None)
    F_alpha = ss2sim(PadeDelay(theta, order=q), DoubleExp(tau, tau2), dt=None)

    y_low = F_low(tau * s + 1)
    y_lamb = F_lamb((tau * s + 1) * np.exp(lmbda * s))
    y_alpha = F_alpha((tau * s + 1) * (tau2 * s + 1))
    y = np.exp(-theta * s)

    # Compute where lmbda*s + lmbda/tau is within the principal branch
    tx = lmbda * 2 * np.pi * freqs
    st = (lmbda / tau > -(tx / np.tan(tx))) & (tx < np.pi) | (tx == 0)
    p = lmbda * s[st] + lmbda / tau
    assert np.allclose(lambertw(p * np.exp(p)), p)

    with sns.axes_style('ticks'):
        with sns.plotting_context('paper', font_scale=2.8):
            pylab.figure(figsize=(18, 5))
            gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1.618])
            gs.update(wspace=0.3)
            ax1 = plt.subplot(gs[1])
            ax2 = plt.subplot(gs[0])

            ax1.set_title(r"$0.1\,$s Delay of $15\,$Hz White Noise").set_y(
                1.05)
            ax1.plot(t, target, lw=4, c=cmap[0], zorder=4, linestyle='--')
            ax1.plot(t, lowpass, alpha=alpha, c=cmap[1], zorder=2)
            ax1.plot(t, dexp, alpha=alpha, c=cmap[3], zorder=2)
            ax1.plot(t, delayed, alpha=alpha, c=cmap[2], zorder=3)
            ax1.set_ylim(-0.5, 0.5)
            ax1.set_xlabel("Time [s]", labelpad=20)
            ax1.set_ylabel("Output")

            ax2.set_title("Delay Accuracy").set_y(1.05)
            ax2.plot(axis,
                     np.zeros_like(freqs),
                     lw=lw,
                     c=cmap[0],
                     zorder=4,
                     linestyle='--',
                     label=r"Ideal")
            ax2.plot(axis,
                     abs(y - y_low),
                     lw=lw,
                     alpha=alpha,
                     c=cmap[1],
                     zorder=3,
                     label=r"Lowpass")
            ax2.plot(axis,
                     abs(y - y_alpha),
                     lw=lw,
                     alpha=alpha,
                     c=cmap[3],
                     zorder=3,
                     label=r"Double-exponential")
            ax2.plot(axis,
                     abs(y - y_lamb),
                     lw=lw,
                     alpha=alpha,
                     c=cmap[2],
                     zorder=3,
                     label=r"Delayed Lowpass")

            s = 0.8
            pts = np.asarray([[1.5, 0], [1.5 - s, -s], [1.5 + s, -s]])
            ax2.add_patch(Polygon(pts, closed=True, color='black'))

            ax2.set_xlabel(r"Frequency $\times \, \theta$ [Hz $\times$ s]",
                           labelpad=20)
            ax2.set_ylabel(r"Absolute Error", labelpad=20)

            ax2.legend(loc='upper left',
                       frameon=True).get_frame().set_alpha(0.8)

            sns.despine(offset=15)

            savefig(targets[0])
示例#10
0
def _test_RLS_network(Simulator, seed, dims, lrate, neuron_type, tau, T_train,
                      T_test, tols):
    # Input is a scalar sinusoid with given frequency
    n_neurons = 100
    freq = 5

    # Learn a linear transformation within T_train seconds
    transform = np.random.RandomState(seed=seed).randn(dims, 1)
    lr = RLS(learning_rate=lrate, pre_synapse=tau)

    with Network(seed=seed) as model:
        u = nengo.Node(output=lambda t: np.sin(freq * 2 * np.pi * t))
        x = nengo.Ensemble(n_neurons, 1, neuron_type=neuron_type)
        y = nengo.Node(size_in=dims)
        y_on = nengo.Node(size_in=dims)
        y_off = nengo.Node(size_in=dims)

        e = nengo.Node(size_in=dims,
                       output=lambda t, e: e
                       if t < T_train else np.zeros_like(e))

        nengo.Connection(u, y, synapse=None, transform=transform)
        nengo.Connection(u, x, synapse=None)
        conn_on = nengo.Connection(x,
                                   y_on,
                                   synapse=None,
                                   learning_rule_type=lr,
                                   function=lambda _: np.zeros(dims))
        nengo.Connection(y, e, synapse=None, transform=-1)
        nengo.Connection(y_on, e, synapse=None)
        nengo.Connection(e, conn_on.learning_rule, synapse=tau)

        nengo.Connection(x, y_off, synapse=None, transform=transform)

        p_y = nengo.Probe(y, synapse=tau)
        p_y_on = nengo.Probe(y_on, synapse=tau)
        p_y_off = nengo.Probe(y_off, synapse=tau)
        p_inv_gamma = nengo.Probe(conn_on.learning_rule, 'inv_gamma')

    with Simulator(model) as sim:
        sim.run(T_train + T_test)

    # Check _descstr
    ops = [op for op in sim.model.operators if isinstance(op, SimRLS)]
    assert len(ops) == 1
    assert str(ops[0]).startswith('SimRLS')

    test = sim.trange() >= T_train

    on_versus_off = nrmse(sim.data[p_y_on][test],
                          target=sim.data[p_y_off][test])

    on_versus_ideal = nrmse(sim.data[p_y_on][test], target=sim.data[p_y][test])

    off_versus_ideal = nrmse(sim.data[p_y_off][test],
                             target=sim.data[p_y][test])

    A = get_activities(sim.model, x, np.linspace(-1, 1, 1000)[:, None])
    gamma_off = A.T.dot(A) + np.eye(n_neurons) / lr.learning_rate
    gamma_on = inv(sim.data[p_inv_gamma][-1])

    gamma_off /= np.linalg.norm(gamma_off)
    gamma_on /= np.linalg.norm(gamma_on)
    gamma_diff = nrmse(gamma_on, target=gamma_off)

    assert on_versus_off < tols[0]
    assert on_versus_ideal < tols[1]
    assert off_versus_ideal < tols[2]
    assert gamma_diff < tols[3]