Esempio n. 1
0
    def do_trial(name, seed, length=2000, dt=0.001, tau_probe=0.02,
                 sanity=False, **kwargs):
        # Note: depends on the globals, (factory, C, model, u, p_u, P, sys)

        process = nengo.processes.WhiteSignal(
            period=length*dt, rms=power, high=freq, y0=0, seed=seed)

        test_u = process.run_steps(length, dt=dt)
        x_ideal = sys.X.filt(test_u, dt=dt)

        if sanity:
            analyze("ideal-%s" % name, 
                    t=process.ntrange(length, dt=dt),
                    u=test_u,
                    x_hat=x_ideal,
                    x_ideal=x_ideal,
                    C=C,
                    theta=theta,
                    dump_file=False,
                    do_plot=False)
            
        u.output = process

        with factory(network=model, dt=dt) as sim:
            sim.run(length*dt)
            if post_fixture:
                post_fixture(sim)

        assert np.allclose(test_u, np.asarray(sim.data[p_u]))

        # Use discrete principle 3, offline, to get x_hat
        # from the unfiltered spikes representing x.
        # This is analagous to probing the PSC, pre-encoding.
        syn_probe = Lowpass(tau_probe)
        map_out = ss2sim(sys, synapse=syn_probe, dt=dt)
        x_raw = np.asarray([sim.data[p] for p in P]).squeeze()
        f = map_out.A.dot(x_raw) + map_out.B.dot(test_u.T)
        x_hat = syn_probe.filt(f, axis=1, dt=dt).T

        return analyze(
            name=name, t=sim.trange(), u=test_u,
            x_hat=x_hat, x_ideal=x_ideal, C=C,
            theta=theta, **kwargs)
Esempio n. 2
0
def go(name, tau, factory, recurrent_solver=Default,
       pre_fixture=None, post_fixture=None):

    set_style()
 
    theta = 0.1
    order = 3
    freq = 3
    power = 1.0  # chosen to keep radii within [-1, 1]

    # print("PadeDelay(%s, %s) => %f%% error @ %sHz" % (
    #     theta, order, 100*abs(pade_delay_error(theta*freq, order=order)), freq))
    pd = PadeDelay(theta=theta, order=order)

    # Heuristic for normalizing state so that each dimension is ~[-1, +1]
    rz = Balanced()(pd, radii=1./(np.arange(len(pd))+1))
    sys = rz.realization

    # Compute matrix to transform from state (x) -> sampled window (u)
    t_samples = 100
    C = np.asarray([readout(len(pd), r)
                    for r in np.linspace(0, 1, t_samples)]).dot(rz.T)
    assert C.shape == (t_samples, len(sys))


    n_neurons = 128  # per dimension
    map_hw = ss2sim(sys, synapse=Lowpass(tau), dt=None)  # analog mapping
    assert np.allclose(map_hw.A, tau*sys.A + np.eye(len(sys)))
    assert np.allclose(map_hw.B, tau*sys.B)

    with nengo.Network() as model:
        if pre_fixture is not None:
            pre_fixture(model)

        u = nengo.Node(output=0, label='u')
        p_u = nengo.Probe(u, synapse=None)
        
        # This is needed because a single node can't connect to multiple
        # different ensembles. We need a separate node for each ensemble.
        Bu = [nengo.Node(output=lambda _, u, b_i=map_hw.B[i].squeeze(): b_i*u,
                         size_in=1, label='Bu[%d]' % i)
              for i in range(len(sys))]
        
        X = []
        for i in range(len(sys)):
            ens = nengo.Ensemble(
                n_neurons=n_neurons, dimensions=1, label='X[%d]' % i)

            X.append(ens)
     
        P = []
        for i in range(len(sys)):
            nengo.Connection(u, Bu[i], synapse=None)
            nengo.Connection(Bu[i], X[i], synapse=tau)
            for j in range(len(sys)):
                nengo.Connection(X[j], X[i], synapse=tau,
                                 function=lambda x_j, a_ij=map_hw.A[i, j]: a_ij*x_j,
                                 solver=recurrent_solver)
            P.append(nengo.Probe(X[i], synapse=None))


    def do_trial(name, seed, length=2000, dt=0.001, tau_probe=0.02,
                 sanity=False, **kwargs):
        # Note: depends on the globals, (factory, C, model, u, p_u, P, sys)

        process = nengo.processes.WhiteSignal(
            period=length*dt, rms=power, high=freq, y0=0, seed=seed)

        test_u = process.run_steps(length, dt=dt)
        x_ideal = sys.X.filt(test_u, dt=dt)

        if sanity:
            analyze("ideal-%s" % name, 
                    t=process.ntrange(length, dt=dt),
                    u=test_u,
                    x_hat=x_ideal,
                    x_ideal=x_ideal,
                    C=C,
                    theta=theta,
                    dump_file=False,
                    do_plot=False)
            
        u.output = process

        with factory(network=model, dt=dt) as sim:
            sim.run(length*dt)
            if post_fixture:
                post_fixture(sim)

        assert np.allclose(test_u, np.asarray(sim.data[p_u]))

        # Use discrete principle 3, offline, to get x_hat
        # from the unfiltered spikes representing x.
        # This is analagous to probing the PSC, pre-encoding.
        syn_probe = Lowpass(tau_probe)
        map_out = ss2sim(sys, synapse=syn_probe, dt=dt)
        x_raw = np.asarray([sim.data[p] for p in P]).squeeze()
        f = map_out.A.dot(x_raw) + map_out.B.dot(test_u.T)
        x_hat = syn_probe.filt(f, axis=1, dt=dt).T

        return analyze(
            name=name, t=sim.trange(), u=test_u,
            x_hat=x_hat, x_ideal=x_ideal, C=C,
            theta=theta, **kwargs)


    data = defaultdict(list)
    for trial in range(25):
        for seed in range(1, 11):
            data['Trial'].append(trial)
            data['Test Case (#)'].append(seed)
            data['NRMSE'].append(
                do_trial(name="scratch-%s-DN-%d-%d" % (name, trial, seed),
                         seed=seed, dump_file=False))

    df = DataFrame(data)
    df.to_pickle(datapath("%s-delay-network.pkl" % name))

    return bs.bootstrap(np.asarray(df['NRMSE']),
                        stat_func=bs_stats.mean, alpha=1-0.95)  # 95% CI
Esempio n. 3
0
def figure_lambert(targets):
    npfile = np.load(LAMBERT_SIM)
    delay = npfile['delay']
    dt = npfile['dt']
    t = npfile['t']
    stim = npfile['stim']
    delayed = npfile['delayed']
    lowpass = npfile['lowpass']
    dexp = npfile['dexp']

    target = ideal_delay(stim, delay, dt)

    e_delayed = nrmse(delayed, target=target)
    e_lowpass = nrmse(lowpass, target=target)
    e_dexp = nrmse(dexp, target=target)

    improvement = (e_lowpass - e_delayed) / e_lowpass * 100
    logging.info("Paper constant: Lambert improvement: %s", improvement)
    logging.info("Paper constant: Delayed NRMSE: %s", e_delayed)
    logging.info("Paper constant: Lowpass NRMSE: %s", e_lowpass)
    logging.info("Paper constant: Double Exp NRMSE: %s", e_dexp)

    sample_rate = 100
    t = t[::sample_rate]
    stim = stim[::sample_rate]
    delayed = delayed[::sample_rate]
    lowpass = lowpass[::sample_rate]
    dexp = dexp[::sample_rate]
    target = target[::sample_rate]

    tau_over_theta = 0.1
    lambda_over_tau = 1.0
    max_freq_times_theta = 4.0
    theta = 0.1  # <-- the graph is still the same, no matter theta!
    tau = tau_over_theta * theta
    lmbda = lambda_over_tau * tau
    max_freq = max_freq_times_theta / theta
    tau2 = tau / 5  # TODO: parameters copied from delayed_synapse()
    q = 6
    assert np.allclose(tau, 0.01)
    assert np.allclose(lmbda, 0.01)

    freqs = np.linspace(0, max_freq, 200)
    s = 2.j * np.pi * freqs
    axis = freqs * theta  # scale-invariant axis

    lw = 3
    alpha = 0.8
    cmap = sns.color_palette(None, 4)

    F_lamb = lambert_delay(theta, lmbda, tau, q - 1, q)
    F_low = ss2sim(PadeDelay(theta, order=q), Lowpass(tau), dt=None)
    F_alpha = ss2sim(PadeDelay(theta, order=q), DoubleExp(tau, tau2), dt=None)

    y_low = F_low(tau * s + 1)
    y_lamb = F_lamb((tau * s + 1) * np.exp(lmbda * s))
    y_alpha = F_alpha((tau * s + 1) * (tau2 * s + 1))
    y = np.exp(-theta * s)

    # Compute where lmbda*s + lmbda/tau is within the principal branch
    tx = lmbda * 2 * np.pi * freqs
    st = (lmbda / tau > -(tx / np.tan(tx))) & (tx < np.pi) | (tx == 0)
    p = lmbda * s[st] + lmbda / tau
    assert np.allclose(lambertw(p * np.exp(p)), p)

    with sns.axes_style('ticks'):
        with sns.plotting_context('paper', font_scale=2.8):
            pylab.figure(figsize=(18, 5))
            gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1.618])
            gs.update(wspace=0.3)
            ax1 = plt.subplot(gs[1])
            ax2 = plt.subplot(gs[0])

            ax1.set_title(r"$0.1\,$s Delay of $15\,$Hz White Noise").set_y(
                1.05)
            ax1.plot(t, target, lw=4, c=cmap[0], zorder=4, linestyle='--')
            ax1.plot(t, lowpass, alpha=alpha, c=cmap[1], zorder=2)
            ax1.plot(t, dexp, alpha=alpha, c=cmap[3], zorder=2)
            ax1.plot(t, delayed, alpha=alpha, c=cmap[2], zorder=3)
            ax1.set_ylim(-0.5, 0.5)
            ax1.set_xlabel("Time [s]", labelpad=20)
            ax1.set_ylabel("Output")

            ax2.set_title("Delay Accuracy").set_y(1.05)
            ax2.plot(axis,
                     np.zeros_like(freqs),
                     lw=lw,
                     c=cmap[0],
                     zorder=4,
                     linestyle='--',
                     label=r"Ideal")
            ax2.plot(axis,
                     abs(y - y_low),
                     lw=lw,
                     alpha=alpha,
                     c=cmap[1],
                     zorder=3,
                     label=r"Lowpass")
            ax2.plot(axis,
                     abs(y - y_alpha),
                     lw=lw,
                     alpha=alpha,
                     c=cmap[3],
                     zorder=3,
                     label=r"Double-exponential")
            ax2.plot(axis,
                     abs(y - y_lamb),
                     lw=lw,
                     alpha=alpha,
                     c=cmap[2],
                     zorder=3,
                     label=r"Delayed Lowpass")

            s = 0.8
            pts = np.asarray([[1.5, 0], [1.5 - s, -s], [1.5 + s, -s]])
            ax2.add_patch(Polygon(pts, closed=True, color='black'))

            ax2.set_xlabel(r"Frequency $\times \, \theta$ [Hz $\times$ s]",
                           labelpad=20)
            ax2.set_ylabel(r"Absolute Error", labelpad=20)

            ax2.legend(loc='upper left',
                       frameon=True).get_frame().set_alpha(0.8)

            sns.despine(offset=15)

            savefig(targets[0])
Esempio n. 4
0
def figure_principle3(targets):
    theta = 0.1
    tau = 0.1 * theta
    lmbda = tau
    orders = range(6, 28)

    freqs = np.linspace(0.1 / theta, 16 / theta, 1000)
    s = 2.j * np.pi * freqs

    y = np.exp(-theta * s)
    Hinvs = (tau * s + 1) * np.exp(lmbda * s)

    cmap_lamb = sns.color_palette("GnBu_d", len(orders))[::-1]
    cmap_ignore = sns.color_palette("OrRd_d", len(orders))[::-1]

    data = np.empty((2, len(DISCRETE_DTS), DISCRETE_SEEDS))

    for seed in range(DISCRETE_SEEDS):
        for i, dt in enumerate(DISCRETE_DTS):
            npfile = np.load(DISCRETE_SIM % (seed, i))
            assert np.allclose(npfile['dt'], dt)
            delay = npfile['delay']
            # t = npfile['t']
            stim = npfile['stim']
            disc = npfile['disc']
            cont = npfile['cont']

            target = ideal_delay(stim, delay, dt)
            e_disc = nrmse(disc, target=target)
            e_cont = nrmse(cont, target=target)

            data[0, i, seed] = e_disc
            data[1, i, seed] = e_cont

    i = np.where(DISCRETE_DTS == 0.001)[0][0]
    assert np.allclose(DISCRETE_DTS[i], 0.001)
    e_disc = np.mean(data, axis=2)[0, i]
    e_cont = np.mean(data, axis=2)[1, i]
    improvement = (e_cont - e_disc) / e_cont * 100
    logging.info("Paper constant: Improvement at 1 ms: %s (%s -> %s)",
                 improvement, e_cont, e_disc)

    with sns.axes_style('ticks'):
        with sns.plotting_context('paper', font_scale=2.8):
            f, (ax1, ax2) = pylab.subplots(1, 2, figsize=(18, 5))

            ax1.set_title("Discrete Lowpass Improvement").set_y(1.05)

            for i, condition, cpal, marker in ((1, 'Principle 3',
                                                sns.color_palette("OrRd_d"),
                                                'X'),
                                               (0, 'Extension',
                                                sns.color_palette("GnBu_d"),
                                                'o')):
                sns.tsplot(data[i].T,
                           1000 * DISCRETE_DTS,
                           condition=condition,
                           color=cpal,
                           marker=marker,
                           markersize=15,
                           lw=3,
                           ci=95,
                           alpha=0.7,
                           ax=ax1)

            ax1.vlines([1.0],
                       np.min(data[0]),
                       2.0,
                       linestyle='--',
                       color='black',
                       lw=4,
                       alpha=0.7,
                       zorder=0)

            ax1.set_xlabel("Discrete Time-step [ms]", labelpad=20)
            ax1.set_ylabel("Absolute Error", labelpad=20)
            ax1.set_xlim(0, 1000 * DISCRETE_DTS[-1] + 0.1)

            ax2.set_title("Delayed Lowpass Improvement").set_y(1.05)

            for i, q in enumerate(orders):
                sys = PadeDelay(theta, order=q)
                mapped = ss2sim(sys, Lowpass(tau), dt=None)
                lambert = lambert_delay(theta, lmbda, tau, q - 1, q)

                y_lamb = lambert(Hinvs)
                y_ignore = mapped(Hinvs)

                ax2.semilogy(freqs * theta,
                             abs(y - y_ignore),
                             lw=2,
                             alpha=0.8,
                             zorder=len(orders) - i,
                             c=cmap_ignore[i])
                ax2.semilogy(freqs * theta,
                             abs(y - y_lamb),
                             lw=2,
                             alpha=0.8,
                             zorder=len(orders) - i,
                             c=cmap_lamb[i])

            lc_ignore = LineCollection(len(orders) * [[(0, 0)]],
                                       lw=10,
                                       colors=cmap_ignore)
            lc_lamb = LineCollection(len(orders) * [[(0, 0)]],
                                     lw=10,
                                     colors=cmap_lamb)
            ax2.legend([lc_ignore, lc_lamb], ['Principle 3', 'Extension'],
                       handlelength=2,
                       handler_map={LineCollection: HandlerDashedLines()})

            ax2.set_xlabel(r"Frequency $\times \, \theta$ [Hz $\times$ s]",
                           labelpad=20)

            sns.despine(offset=15)

            savefig(targets[0])
Esempio n. 5
0
def do_trial(n_neurons=300,
             partition=None,
             freq=1,
             sim_t=10,
             tau=0.2,
             tau_probe=0.2,
             dt=0.001,
             input_seed=0,
             ens_kwargs={
                 'neuron_type':
                 nengo_loihi.neurons.LoihiSpikingRectifiedLinear(),
             }):

    # Discretized Principle 3 mapping of the integrator
    A, B, C, D = ss2sim(~s, nengo.Lowpass(tau), dt=dt).ss
    assert np.allclose(C, 1)
    assert np.allclose(D, 0)

    def loihi_factory(model, dt, tau=tau):
        loihi_model = nengo_loihi.builder.Model(dt=dt)
        # https://github.com/nengo/nengo-loihi/issues/97
        assert loihi_model.decode_tau == 0.005
        loihi_model.decode_tau = tau  # used by spike generator
        return nengo_loihi.Simulator(model,
                                     model=loihi_model,
                                     precompute=True,
                                     dt=dt)

    # The integral of test_u is test_x, where test_x is [-1, 1] and
    # starts at zero.
    process = nengo.processes.WhiteSignal(period=sim_t,
                                          high=freq,
                                          y0=0,
                                          seed=input_seed)
    test_x = process.run(sim_t, dt=dt)
    test_x /= np.max(np.abs(test_x))
    test_u = (test_x - np.roll(test_x, 1)) / dt

    # Roll the signals so that it starts where both u and x are close to 0
    w_x = 2  # used for Braindrop experiment
    cost = test_u**2 + w_x * (test_x)**2
    test_u = np.roll(test_u, -np.argmin(cost))
    # don't need to roll test_x because we don't use it anywhere else

    # But due to:
    #   https://github.com/nengo/nengo-loihi/issues/115
    # we need to split the input B*u across multiple spike generators
    # each normalized to [1, 1], and furthermore move the synapse
    # to inter_tau. Both of these changes have additional benefits
    # for accuracy as well, related to altering the noise floor
    # introduced by spike generation, and scaling with dynamic range
    # (number of generators created).
    split = np.ceil(np.max(np.abs(B * test_u))).astype(int)

    with nengo.Network() as model:
        u = nengo.Node(output=nengo.processes.PresentInput(test_u, dt))

        if partition is None:
            x = nengo.Ensemble(n_neurons, 1, **ens_kwargs)

            make_input_connection = nengo.Connection

            nengo.Connection(x,
                             x,
                             transform=A,
                             synapse=tau,
                             solver=nengo.solvers.LstsqL2(weights=True))

            p_x = nengo.Probe(x, synapse=tau_probe)

        else:
            if n_neurons % partition != 0:
                raise ValueError(
                    "n_neurons (%s) must be divisible by partition (%s)" %
                    (n_neurons, partition))

            x = VirtualEnsemble(n_ensembles=n_neurons // partition,
                                n_neurons_per_ensemble=partition,
                                dimensions=1,
                                **ens_kwargs)

            def make_input_connection(pre, post, **kwargs):
                return post.add_input(u, **kwargs)

            x.add_input(x.add_output(dt=dt)[0], transform=A, synapse=tau)

            # Copy the output so that the above is collapsed as a passthrough
            p_x = nengo.Probe(x.add_output(dt=dt)[0], synapse=tau_probe)

        for _ in range(split):
            # Use synapse=None here because the spike generator
            # will have a synapse of tau
            make_input_connection(u, x, transform=B / split, synapse=None)

        p_u = nengo.Probe(u, synapse=None)
        p_ideal = nengo.Probe(u, synapse=nengolib.Lowpass(tau_probe) / s)

    with loihi_factory(model, dt) as sim:
        init_generators(sim)
        sim.run(sim_t)

    return {
        't': sim.trange(),
        'u': sim.data[p_u],
        'actual': sim.data[p_x],
        'ideal': sim.data[p_ideal],
    }