Example #1
0
def error_plot(dt, prestime, t, ystar, learners, tmin=None, tmax=None, ax=None):
    ax = plt.gca() if ax is None else ax
    es = [learner['e'] for learner in learners]

    if tmin is not None or tmax is not None:
        tmin = t[0] if tmin is None else tmin
        tmax = t[-1] if tmax is None else tmax
        tmask = (t >= tmin) & (t <= tmax)
        t = t[tmask]
        es = [e[tmask] for e in es]

    vsynapse = Alpha(0.01, default_dt=dt)

    # esynapse = Alpha(1*prestime, default_dt=dt)
    # esynapse = Alpha(1*prestime, default_dt=dt)
    esynapse = Alpha(5*prestime, default_dt=dt)
    # esynapse = Alpha(20*prestime, default_dt=dt)

    # yrms = rms(ystar, axis=1).mean()
    yrms = rms(vsynapse.filtfilt(ystar), axis=1).mean()
    # yrms = rms(esynapse.filtfilt(ystar), axis=1).mean()
    # print(yrms)
    for e in es:
        # erms = esynapse.filtfilt(rms(e, axis=1) / yrms)
        # erms = rms(esynapse.filtfilt(e), axis=1) / yrms
        # erms = rms(vsynapse.filtfilt(e), axis=1) / yrms
        erms = esynapse.filtfilt(rms(vsynapse.filtfilt(e), axis=1) / yrms)
        plt.plot(t, erms)
Example #2
0
def output1_plot(dt, t, ystar, learner, tmin=None, tmax=None, ax=None):
    ax = plt.gca() if ax is None else ax
    y = learner['y']

    if tmin is not None or tmax is not None:
        tmin = t[0] if tmin is None else tmin
        tmax = t[-1] if tmax is None else tmax
        tmask = (t >= tmin) & (t <= tmax)
        t = t[tmask]
        ystar = ystar[tmask]
        y = y[tmask]

    # dinds = slice(0, 2)
    dinds = list(range(2))

    vsynapse = Alpha(0.01, default_dt=dt)
    ystar = ystar[:, dinds]
    # y = y[:, dinds]
    y = vsynapse.filtfilt(y[:, dinds])

    ax.plot(t, y[:, dinds])
    ax.set_color_cycle(None)
    ax.plot(t, ystar[:, dinds], ':')

    plt.legend(['dim %d' % (i+1) for i in range(len(dinds))], loc='best')

    plt.xlim((tmin, tmax))
    plt.ylim((-1.5, 1.5))

    # plt.xticks((tmin, tmax))
    # ax.set_xticks(ax.get_xticks()[::2])
    ax.get_xaxis().get_major_formatter().set_useOffset(False)
    ax.get_xaxis().get_major_formatter().set_scientific(False)
Example #3
0
def output_plot(dt, t, ystar, learners, tmin=None, tmax=None, ax=None):
    ax = plt.gca() if ax is None else ax
    ys = [learner['y'] for learner in learners]

    if tmin is not None or tmax is not None:
        tmin = t[0] if tmin is None else tmin
        tmax = t[-1] if tmax is None else tmax
        tmask = (t >= tmin) & (t <= tmax)
        t = t[tmask]
        ystar = ystar[tmask]
        ys = [y[tmask] for y in ys]

    # dinds = slice(0, 2)
    dinds = list(range(2))
    dstyles = ['-', ':']
    # dstyles = ['-', '-.']

    vsynapse = Alpha(0.01, default_dt=dt)
    ystar = ystar[:, dinds]
    ys = [vsynapse.filtfilt(y[:, dinds]) for y in ys]
    for k, (dind, dstyle) in enumerate(zip(dinds, dstyles)):
        ax.set_color_cycle(None)
        ax.plot(t, ystar[:, k], 'k', linestyle=dstyle)
        for y in ys:
            ax.plot(t, y[:, k], linestyle=dstyle)

    plt.ylim((-1.5, 1.5))
Example #4
0
def error_layers_plots(dt, t, learners):
    vsynapse = Alpha(0.01, default_dt=dt)

    for learner in [l for l in learners if 'els' in l]:
        plt.figure()
        plt.subplot(211)
        dind = 0

        e = vsynapse.filtfilt(learner['e'])
        els = [vsynapse.filtfilt(el) for el in learner['els']]
        plt.plot(t, e[:, dind])
        [plt.plot(t, el[:, dind]) for el in els]

        plt.subplot(212)
        plt.plot(t, norm(e, axis=1))
        [plt.plot(t, norm(el, axis=1)) for el in els]

    plt.show()
Example #5
0
def trials_error_plot(prestime, t, ystar, learners):
    pdt = 0.01
    vsynapse = Alpha(0.02, default_dt=pdt)

    plt.figure()
    dinds = slice(0, 2)

    plt.subplot(211)
    plt.plot(t, ystar[:, dinds])
    for learner in learners:
        y = vsynapse.filtfilt(learner['y'][:, dinds])
        plt.plot(t, y)
    plt.ylabel('outputs')

    plt.subplot(212)
    esynapse = Alpha(5 * prestime, default_dt=pdt)
    for learner in learners:
        e = norm(esynapse.filtfilt(learner['e']), axis=1)
        plt.plot(t, e)
    plt.ylabel('errors')
Example #6
0
def cosyne_plot(dt, prestime, t, ystar, learners, n_test_pre, n_train,
                offline_data=None):
    vsynapse = Alpha(0.01, default_dt=dt)

    learner = learners[-1]

    n_show_pre = 10
    n_show_post = 10
    assert n_show_pre <= n_test_pre

    t0 = n_test_pre*prestime
    t1 = t0 + n_train*prestime
    # t2 = t1 + n_test_post*prestime

    tpre0 = 0
    tpre1 = tpre0 + n_show_pre*prestime
    tpost0 = t1
    # tpost0 = t1 + 10*prestime
    tpost1 = tpost0 + n_show_post*prestime

    plt.figure(figsize=(6.4, 7))

    # subplot_shape = (2, 3)
    subplot_shape = (3, 3)

    ax = plt.subplot2grid(subplot_shape, (0, 2))
    # output_plot(dt, t, ystar, learners, tmin=tpre0, tmax=tpre1, ax=ax)
    output1_plot(dt, t, ystar, learner, tmin=tpre0, tmax=tpre1, ax=ax)
    plt.title('Pre-learning output')

    ax = plt.subplot2grid(subplot_shape, (1, 2))
    # output_plot(dt, t, ystar, learners, tmin=tpost0, tmax=tpost1, ax=ax)
    output1_plot(dt, t, ystar, learner, tmin=tpost0, tmax=tpost1, ax=ax)
    plt.xlabel('simulation time [s]')
    plt.title('Post-learning output')

    hid_spikes0 = learner['hs'][0][:, :40]
    hid_spikes1 = learner['hs'][-1][:, :40]

    ax = plt.subplot2grid(subplot_shape, (0, 0))
    spike_plot(dt, t, hid_spikes0, tmin=tpre0, tmax=tpre1, ax=ax)
    plt.title('Pre-learning spikes 1')

    ax = plt.subplot2grid(subplot_shape, (1, 0))
    spike_plot(dt, t, hid_spikes0, tmin=tpost0, tmax=tpost1, ax=ax)
    plt.xlabel('simulation time [s]')
    plt.title('Post-learning spikes 1')

    ax = plt.subplot2grid(subplot_shape, (0, 1))
    spike_plot(dt, t, hid_spikes1, tmin=tpre0, tmax=tpre1, ax=ax)
    plt.title('Pre-learning spikes 2')

    ax = plt.subplot2grid(subplot_shape, (1, 1))
    spike_plot(dt, t, hid_spikes1, tmin=tpost0, tmax=tpost1, ax=ax)
    plt.xlabel('simulation time [s]')
    plt.title('Post-learning spikes 2')

    ax = plt.subplot2grid(subplot_shape, (2, 0), colspan=3)
    error_plot(dt, prestime, t, ystar, [learner], tmin=t0, tmax=t1, ax=ax)
    if offline_data:
        Yrms = rms(offline_data['Y'], axis=1).mean()
        eo = offline_data['learners'][-1]['batch_errors'] / Yrms
        dto = prestime*offline_data['n_per_batch']
        to = t0 + dto*np.arange(len(eo))

        tmask = (to >= t0) & (to <= t1)
        to = to[tmask]
        eo = eo[tmask]

        esynapse = Alpha(5*prestime, default_dt=dto)
        eo = esynapse.filtfilt(eo)

        plt.plot(to, eo, 'k:')
        plt.xlim((t0, t1))
        plt.legend(['spiking', 'non-spiking'], loc='best')

    plt.xlabel('simulation time [s]')
    plt.ylabel('normalized RMS error')
    plt.title('Error')

    plt.tight_layout()
Example #7
0
def plot_batches(x, label=None):
    filt = Alpha(200, default_dt=n_per_batch)
    y = filt.filtfilt(x) if len(x) > 0 else []
    batch_inds = n_per_batch * np.arange(len(x))
    plt.semilogy(batch_inds, y, label=label)
Example #8
0
# --- plot results (rows=[train, test], cols=learners)
rows = 2
cols = len(results[0])

plt.figure(figsize=(7, 6))

# filt = Alpha(3000, default_dt=n_per_batch)
filt = Alpha(10000, default_dt=n_per_batch)
for col in range(cols):
    ax = plt.subplot(rows, cols, col + 1)

    for i, label in enumerate(f_df_labels):
        learner = results[i][col]
        x = learner.batch_errors
        # y = x
        y = filt.filtfilt(x) if len(x) > 0 else []
        batch_inds = n_per_batch * np.arange(len(x))
        ax.semilogy(batch_inds, y, label=label)

    plt.ylim([5e-3, 2e-1])
    ax.set_xticklabels([])
    if col == 0:
        plt.ylabel('train error')
    # if col == cols - 1:
    # plt.legend(loc=3)
    plt.title(learner.name)

filt = Alpha(1, default_dt=1)
for col in range(cols):
    plt.subplot(rows, cols, cols + col + 1)
Example #9
0
        learner.train(epochs, batch_fn, test_set=test_set)

    for learner in learners:
        print(", ".join("||W%d|| = %0.3f" % (i, norm(w))
                        for i, w in enumerate(learner.network.weights)))

# --- plots
fig = plt.figure()
rows, cols = 4, 1

filt = Alpha(100, default_dt=n_per_batch)

ax = fig.add_subplot(rows, cols, 1)
ax.set_yscale('log')
for learner in learners:
    ax.plot(filt.filtfilt(learner.batch_errors), label=learner.name)

ax = fig.add_subplot(rows, cols, 2)
ax.set_yscale('log')
for learner in learners:
    if learner.delta_norms is not None:
        ax.plot(filt.filtfilt(learner.delta_norms), label=learner.name)

ax = fig.add_subplot(rows, cols, 3)
for learner in learners:
    if getattr(learner, 'bp_angles', None) is not None:
        angles = np.array(learner.bp_angles)
        ax.plot(filt.filtfilt(angles) * (180 / np.pi), label=learner.name)
ax.set_ylim([0, 90])

ax = fig.add_subplot(rows, cols, 4)
Example #10
0
rows = 2
cols = 2

n_batches = len(learners[0].batch_errors)
batch_inds = (n_per_batch / 1000.) * np.arange(n_batches)
epoch_inds = (trainX.shape[0] / 1000.) * np.arange(1, epochs+1)

# - train subplot
ax = fig.add_subplot(rows, cols, 1)

# filt = Alpha(1000, default_dt=n_per_batch)
# filt = Alpha(3000, default_dt=n_per_batch)
filt = Alpha(10000, default_dt=n_per_batch)

for learner in learners:
    y = filt.filtfilt(learner.batch_errors)
    ax.semilogy(batch_inds, y, label=learner.name)

# plt.ylim([1e-4, 5e-1])
plt.ylim([None, 2e-1])
plt.xlabel('thousands of examples')
plt.ylabel('train error')
plt.legend(loc='best')
# plt.title("Train error")

# - test subplot
ax = fig.add_subplot(rows, cols, 2)

filt = Alpha(0.5, default_dt=1)
# filt = Alpha(1, default_dt=1)
Example #11
0
 def plot_batches(x, label=None, color=None):
     filt = Alpha(10, default_dt=n_per_batch)
     y = filt.filtfilt(x) / Yrms
     batch_inds = n_per_batch * np.arange(len(x))
     plt.plot(batch_inds, y, label=label, color=color)
Example #12
0
# epoch_inds = (trainX.shape[0] / 1000.) * np.arange(1, epochs+1)

fig = plt.figure()
rows, cols = 5, 1
# layer_styles = ('-', '-.', ':', '--')
layer_styles = ('-', '--', ':')

# filt = Alpha(10, default_dt=n_per_batch)
# filt = Alpha(20, default_dt=n_per_batch)
# filt = Alpha(30, default_dt=n_per_batch)
filt = Alpha(100, default_dt=n_per_batch)

ax = fig.add_subplot(rows, cols, 1)
ax.set_yscale('log')
batch_errors = np.array([learner.batch_errors for learner in learners])
ax.plot(batch_inds, filt.filtfilt(batch_errors, axis=1).T)

ax = fig.add_subplot(rows, cols, 2)
ax.set_yscale('log')
delta_norms = np.array([
    x.delta_norms if x.delta_norms else np.nan * np.ones((n_batches, n_hids))
    for x in learners
])
delta_norms = filt.filtfilt(delta_norms, axis=1)
for i in range(n_hids):
    ax.set_color_cycle(None)
    ax.plot(batch_inds, delta_norms[..., i].T, linestyle=layer_styles[i])

ax = fig.add_subplot(rows, cols, 3)
bp_angles = (180 / np.pi) * np.array([
    getattr(learner, 'bp_angles', np.nan * np.ones((n_batches, n_hids)))
n_fdfs = batch_errors.shape[1]
n_learners = batch_errors.shape[2]
assert n_learners == len(learner_names)

rows = 2
cols = n_learners

plt.figure(figsize=(7, 6))

filt = Alpha(3000, default_dt=n_per_batch)
# filt = Alpha(10000, default_dt=n_per_batch)
for col in range(cols):
    ax = plt.subplot(rows, cols, col + 1)

    # error = filt.filt(batch_errors[:, :, col, :], axis=-1)
    error = filt.filtfilt(batch_errors[:, :, col, :], axis=-1)
    # batch_inds = n_per_batch * np.arange(error.shape[-1])
    batch_inds = (n_per_batch / 1000.) * np.arange(error.shape[-1])
    error, batch_inds = error[..., ::10], batch_inds[::10]

    sns.tsplot(
        data=np.transpose(error, (0, 2, 1)),
        time=batch_inds,
        condition=f_df_labels,
        # err_style='unit_traces',
        legend=(col == 0))

    ax.set_ylim([5e-3, 2e-1])
    ax.set_yscale('log')
    ax.set_xticklabels([])
    if col == 0: