def run(n_neurons=10000, neuron_type=LIF(), t_train=200, t=200, f=DoubleExp(1e-3, 1e-1), dt=0.001, dt_sample=0.003, tt=1.0, seed=0, smooth=30, reg=1e-1, penalty=0, df_evals=20, load_fd=False): d_ens = np.zeros((n_neurons, 3)) f_ens = f if load_fd: load = np.load(load_fd) d_ens = load['d_ens'] taus_ens = load['taus_ens'] f_ens = DoubleExp(taus_ens[0], taus_ens[1]) d_ens_gauss = load['d_ens_gauss'] else: print('Optimizing ens filters and decoders') data = go(d_ens, f_ens, neuron_type=neuron_type, n_neurons=n_neurons, L=True, t=t_train, f=f, dt=dt, dt_sample=dt_sample, seed=seed) d_ens, f_ens, taus_ens = df_opt(data['x'], data['ens'], f, df_evals=df_evals, reg=reg, penalty=penalty, dt=dt_sample, dt_sample=dt_sample, name='lorenz_%s'%neuron_type) all_targets_gauss = gaussian_filter1d(data['x'], sigma=smooth, axis=0) all_spikes_gauss = gaussian_filter1d(data['ens'], sigma=smooth, axis=0) d_ens_gauss = nengo.solvers.LstsqL2(reg=reg)(all_spikes_gauss, all_targets_gauss)[0] np.savez('data/lorenz_%s_fd.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_ens_gauss=d_ens_gauss) f_times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(f_times, f.impulse(len(f_times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" %(-1./f.poles[0], -1./f.poles[1])) ax.plot(f_times, f_ens.impulse(len(f_times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" %(-1./f_ens.poles[0], -1./f_ens.poles[1], np.count_nonzero(d_ens), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/lorenz_%s_filters_ens.pdf"%neuron_type) tar = f.filt(data['x'], dt=dt_sample) a_ens = f_ens.filt(data['ens'], dt=dt_sample) ens = np.dot(a_ens, d_ens) z_tar_peaks, _ = find_peaks(tar[:,2], height=0) # gives time indices of z-component-peaks z_ens_peaks, _ = find_peaks(ens[:,2], height=0) fig = plt.figure() ax = fig.add_subplot(121, projection='3d') ax2 = fig.add_subplot(122, projection='3d') ax.plot(*tar.T, linewidth=0.25) # ax.scatter(*tar[z_tar_peaks].T, color='r', s=1) ax2.plot(*ens.T, linewidth=0.25) # ax2.scatter(*ens[z_ens_peaks].T, color='r', s=1, marker='v') ax.set(xlabel="x", ylabel="y", zlabel="z", xlim=((-20, 20)), ylim=((-10, 30)), zlim=((0, 40))) ax.xaxis.pane.fill = False ax.yaxis.pane.fill = False ax.zaxis.pane.fill = False ax.xaxis.pane.set_edgecolor('w') ax.yaxis.pane.set_edgecolor('w') ax.zaxis.pane.set_edgecolor('w') ax.grid(False) ax2.set(xlabel="x", ylabel="y", zlabel="z", xlim=((-20, 20)), ylim=((-10, 30)), zlim=((0, 40))) ax2.xaxis.pane.fill = False ax2.yaxis.pane.fill = False ax2.zaxis.pane.fill = False ax2.xaxis.pane.set_edgecolor('w') ax2.yaxis.pane.set_edgecolor('w') ax2.zaxis.pane.set_edgecolor('w') ax2.grid(False) plt.savefig("plots/lorenz_%s_train_3D.pdf"%neuron_type) fig, (ax1, ax2, ax3) = plt.subplots(1, 3) ax1.plot(tar[:,0], tar[:,1], linestyle="--", linewidth=0.25) ax2.plot(tar[:,1], tar[:,2], linestyle="--", linewidth=0.25) ax3.plot(tar[:,0], tar[:,2], linestyle="--", linewidth=0.25) # ax2.scatter(tar[z_tar_peaks, 1], tar[z_tar_peaks, 2], s=3, color='r') # ax3.scatter(tar[z_tar_peaks, 0], tar[z_tar_peaks, 2], s=3, color='g') ax1.plot(ens[:,0], ens[:,1], linewidth=0.25) ax2.plot(ens[:,1], ens[:,2], linewidth=0.25) ax3.plot(ens[:,0], ens[:,2], linewidth=0.25) # ax2.scatter(ens[z_ens_peaks, 1], ens[z_ens_peaks, 2], s=3, color='r', marker='v') # ax3.scatter(ens[z_ens_peaks, 0], ens[z_ens_peaks, 2], s=3, color='g', marker='v') ax1.set(xlabel='x', ylabel='y') ax2.set(xlabel='y', ylabel='z') ax3.set(xlabel='x', ylabel='z') plt.tight_layout() plt.savefig("plots/lorenz_%s_train_pairwise.pdf"%neuron_type) plt.close('all') # Plot tent map and fit the data to a gaussian print('Plotting tent map') trans = int(tt/dt) tar_gauss = gaussian_filter1d(data['x'][trans:], sigma=smooth, axis=0) a_ens_gauss = gaussian_filter1d(data['ens'][trans:], sigma=smooth, axis=0) ens_gauss = np.dot(a_ens_gauss, d_ens_gauss) z_tar_peaks = find_peaks(tar_gauss[:,2], height=0)[0][1:] z_tar_values_horz = np.ravel(tar_gauss[z_tar_peaks, 2][:-1]) z_tar_values_vert = np.ravel(tar_gauss[z_tar_peaks, 2][1:]) z_ens_peaks = find_peaks(ens_gauss[:,2], height=0)[0][1:] z_ens_values_horz = np.ravel(ens_gauss[z_ens_peaks, 2][:-1]) z_ens_values_vert = np.ravel(ens_gauss[z_ens_peaks, 2][1:]) # def gaussian(x, mu, sigma, mag): # return mag * np.exp(-0.5*(np.square((x-mu)/sigma))) # p0 = [36, 2, 40] # param_ens, _ = curve_fit(gaussian, z_ens_values_horz, z_ens_values_vert, p0=p0) # param_tar, _ = curve_fit(gaussian, z_tar_values_horz, z_tar_values_vert, p0=p0) # horzs_tar = np.linspace(np.min(z_tar_values_horz), np.max(z_tar_values_horz), 100) # gauss_tar = gaussian(horzs_tar, param_tar[0], param_tar[1], param_tar[2]) # horzs_ens = np.linspace(np.min(z_ens_values_horz), np.max(z_ens_values_horz), 100) # gauss_ens = gaussian(horzs_ens, param_ens[0], param_ens[1], param_ens[2]) # error = entropy(gauss_ens, gauss_tar) fig, ax = plt.subplots() ax.scatter(z_tar_values_horz, z_tar_values_vert, alpha=0.5, color='r', label='target') # ax.plot(horzs_tar, gauss_tar, color='r', linestyle='--', label='target fit') ax.scatter(z_ens_values_horz, z_ens_values_vert, alpha=0.5, color='b', label='ens') # ax.plot(horzs_ens, gauss_ens, color='b', linestyle='--', label='ens fit') ax.set(xlabel=r'$\mathrm{max}_n (z)$', ylabel=r'$\mathrm{max}_{n+1} (z)$')#, title='error=%.5f'%error) plt.legend(loc='upper right') plt.savefig("plots/lorenz_%s_train_tent.pdf"%(neuron_type)) print("testing") data = go(d_ens, f_ens, neuron_type=neuron_type, n_neurons=n_neurons, L=False, t=t, f=f, dt=dt, dt_sample=dt_sample, seed=seed) tar = f.filt(data['x'], dt=dt_sample) a_ens = f_ens.filt(data['ens'], dt=dt_sample) ens = np.dot(a_ens, d_ens) z_tar_peaks, _ = find_peaks(tar[:,2], height=0) # gives time indices of z-component-peaks z_ens_peaks, _ = find_peaks(ens[:,2], height=0) fig = plt.figure() ax = fig.add_subplot(121, projection='3d') ax2 = fig.add_subplot(122, projection='3d') ax.plot(*tar.T, linewidth=0.25) # ax.scatter(*tar[z_tar_peaks].T, color='r', s=1) ax2.plot(*ens.T, linewidth=0.25) # ax2.scatter(*ens[z_ens_peaks].T, color='r', s=1, marker='v') ax.set(xlabel="x", ylabel="y", zlabel="z", xlim=((-20, 20)), ylim=((-10, 30)), zlim=((0, 40))) ax.xaxis.pane.fill = False ax.yaxis.pane.fill = False ax.zaxis.pane.fill = False ax.xaxis.pane.set_edgecolor('w') ax.yaxis.pane.set_edgecolor('w') ax.zaxis.pane.set_edgecolor('w') ax.grid(False) ax2.set(xlabel="x", ylabel="y", zlabel="z", xlim=((-20, 20)), ylim=((-10, 30)), zlim=((0, 40))) ax2.xaxis.pane.fill = False ax2.yaxis.pane.fill = False ax2.zaxis.pane.fill = False ax2.xaxis.pane.set_edgecolor('w') ax2.yaxis.pane.set_edgecolor('w') ax2.zaxis.pane.set_edgecolor('w') ax2.grid(False) plt.savefig("plots/lorenz_%s_test_3D.pdf"%neuron_type) fig, (ax1, ax2, ax3) = plt.subplots(1, 3) ax1.plot(tar[:,0], tar[:,1], linestyle="--", linewidth=0.25) ax2.plot(tar[:,1], tar[:,2], linestyle="--", linewidth=0.25) ax3.plot(tar[:,0], tar[:,2], linestyle="--", linewidth=0.25) # ax2.scatter(tar[z_tar_peaks, 1], tar[z_tar_peaks, 2], s=3, color='r') # ax3.scatter(tar[z_tar_peaks, 0], tar[z_tar_peaks, 2], s=3, color='g') ax1.plot(ens[:,0], ens[:,1], linewidth=0.25) ax2.plot(ens[:,1], ens[:,2], linewidth=0.25) ax3.plot(ens[:,0], ens[:,2], linewidth=0.25) # ax2.scatter(ens[z_ens_peaks, 1], ens[z_ens_peaks, 2], s=3, color='r', marker='v') # ax3.scatter(ens[z_ens_peaks, 0], ens[z_ens_peaks, 2], s=3, color='g', marker='v') ax1.set(xlabel='x', ylabel='y') ax2.set(xlabel='y', ylabel='z') ax3.set(xlabel='x', ylabel='z') plt.tight_layout() plt.savefig("plots/lorenz_%s_test_pairwise.pdf"%neuron_type) plt.close('all') # Plot tent map and fit the data to a gaussian print('Plotting tent map') trans = int(tt/dt) tar_gauss = gaussian_filter1d(data['x'][trans:], sigma=smooth, axis=0) a_ens_gauss = gaussian_filter1d(data['ens'][trans:], sigma=smooth, axis=0) ens_gauss = np.dot(a_ens_gauss, d_ens_gauss) z_tar_peaks = find_peaks(tar_gauss[:,2], height=0)[0][1:] z_tar_values_horz = np.ravel(tar_gauss[z_tar_peaks, 2][:-1]) z_tar_values_vert = np.ravel(tar_gauss[z_tar_peaks, 2][1:]) z_ens_peaks = find_peaks(ens_gauss[:,2], height=0)[0][1:] z_ens_values_horz = np.ravel(ens_gauss[z_ens_peaks, 2][:-1]) z_ens_values_vert = np.ravel(ens_gauss[z_ens_peaks, 2][1:]) # def gaussian(x, mu, sigma, mag): # return mag * np.exp(-0.5*(np.square((x-mu)/sigma))) # p0 = [36, 2, 40] # param_ens, _ = curve_fit(gaussian, z_ens_values_horz, z_ens_values_vert, p0=p0) # param_tar, _ = curve_fit(gaussian, z_tar_values_horz, z_tar_values_vert, p0=p0) # horzs_tar = np.linspace(np.min(z_tar_values_horz), np.max(z_tar_values_horz), 100) # gauss_tar = gaussian(horzs_tar, param_tar[0], param_tar[1], param_tar[2]) # horzs_ens = np.linspace(np.min(z_ens_values_horz), np.max(z_ens_values_horz), 100) # gauss_ens = gaussian(horzs_ens, param_ens[0], param_ens[1], param_ens[2]) # error = entropy(gauss_ens, gauss_tar) fig, ax = plt.subplots() ax.scatter(z_tar_values_horz, z_tar_values_vert, alpha=0.5, color='r', label='target') # ax.plot(horzs_tar, gauss_tar, color='r', linestyle='--', label='target fit') ax.scatter(z_ens_values_horz, z_ens_values_vert, alpha=0.5, color='b', label='ens') # ax.plot(horzs_ens, gauss_ens, color='b', linestyle='--', label='ens fit') ax.set(xlabel=r'$\mathrm{max}_n (z)$', ylabel=r'$\mathrm{max}_{n+1} (z)$')#, title='error=%.5f'%error) plt.legend(loc='upper right') plt.savefig("plots/lorenz_%s_test_tent.pdf"%(neuron_type))
def run(n_neurons=30, t=30, t_test=10, n_encodes=20, dt=0.001, n_tests=10, neuron_type=LIF(), f=DoubleExp(1e-3, 3e-2), f_out=DoubleExp(1e-3, 1e-1), reg=0, penalty=1.0, load_w=None, load_df=None): d_ens = np.zeros((n_neurons, 1)) f_ens = f f_smooth = DoubleExp(1e-2, 2e-1) w_ens = None e_ens = None w_ens2 = None e_ens2 = None print('\nNeuron Type: %s' % neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w: w_ens = np.load(load_w)['w_ens'] else: print('Optimizing ens1 encoders') for nenc in range(n_encodes): print("encoding trial %s" % nenc) stim_func = make_normed_flipped(value=1.2, t=t, dt=dt, f=f, seed=nenc) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, dt=0.001, stim_func=stim_func, neuron_type=neuron_type, w_ens=w_ens, e_ens=e_ens, L=True) e_ens = data['e_ens'] w_ens = data['w_ens'] np.savez('data/identity_w.npz', e_ens=e_ens, w_ens=w_ens) fig, ax = plt.subplots() sns.distplot(np.ravel(w_ens), ax=ax, kde=False) ax.set(xlabel='weights', ylabel='frequency') plt.savefig("plots/tuning/identity_%s_w_ens_nenc_%s.pdf" % (neuron_type, nenc)) a_ens = f_smooth.filt(data['ens'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens') ax.set(ylim=((0, 40))) plt.legend() plt.savefig( 'plots/tuning/identity_ens_nenc_%s_activity_%s.pdf' % (nenc, n)) plt.close('all') if load_df: load = np.load(load_df) d_ens = load['d_ens'] d_out1 = load['d_out1'] taus_ens = load['taus_ens'] taus_out1 = load['taus_out1'] f_ens = DoubleExp(taus_ens[0], taus_ens[1]) f_out1 = DoubleExp(taus_out1[0], taus_out1[1]) else: print('Optimizing ens1 filters and decoders') stim_func = make_normed_flipped(value=1.0, t=t, dt=dt, f=f) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, w_ens=w_ens) d_ens, f_ens, taus_ens = df_opt(data['x'], data['ens'], f, dt=dt, reg=reg, penalty=penalty, name='identity_%s' % neuron_type) d_out1, f_out1, taus_out1 = df_opt(data['x'], data['ens'], f_out, dt=dt, reg=0, penalty=0, name='identity_%s' % neuron_type) np.savez('data/identity_%s_df.npz' % neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f.impulse(len(times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" % (-1. / f.poles[0], -1. / f.poles[1])) ax.plot(times, f_ens.impulse(len(times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" % (-1. / f_ens.poles[0], -1. / f_ens.poles[1], np.count_nonzero(d_ens), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/identity_%s_filters_ens.pdf" % neuron_type) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" % (-1. / f_out.poles[0], -1. / f_out.poles[1])) ax.plot(times, f_out1.impulse(len(times), dt=0.0001), label=r"$f^{out1}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" % (-1. / f_out1.poles[0], -1. / f_out1.poles[1], np.count_nonzero(d_out1), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/identity_%s_filters_out1.pdf" % neuron_type) a_ens = f_out1.filt(data['ens'], dt=dt) x = f_out.filt(data['x'], dt=dt) xhat_ens = np.dot(a_ens, d_out1) rmse_ens = rmse(xhat_ens, x) fig, ax = plt.subplots() ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' % rmse_ens) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1") plt.legend(loc='upper right') plt.savefig("plots/identity_%s_ens1_train.pdf" % neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w: w_ens2 = np.load(load_w)['w_ens2'] else: print('Optimizing ens2 encoders') for nenc in range(n_encodes): print("encoding trial %s" % nenc) stim_func = make_normed_flipped(value=1.2, t=t, dt=dt, f=f, seed=nenc) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, f_smooth=f_smooth, neuron_type=neuron_type, stim_func=stim_func, w_ens=w_ens, w_ens2=w_ens2, e_ens2=e_ens2, L2=True) w_ens2 = data['w_ens2'] e_ens2 = data['e_ens2'] fig, ax = plt.subplots() sns.distplot(np.ravel(w_ens2), ax=ax) ax.set(xlabel='weights', ylabel='frequency') plt.savefig("plots/tuning/identity_%s_w_ens2_nenc_%s.pdf" % (neuron_type, nenc)) np.savez('data/identity_w.npz', w_ens=w_ens, w_ens2=w_ens2, e_ens=e_ens, e_ens2=e_ens2) a_ens = f_smooth.filt(data['ens2'], dt=dt) a_supv = f_smooth.filt(data['supv2'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv2') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens2') ax.set(ylim=((0, 40))) plt.legend() plt.savefig( 'plots/tuning/identity_ens2_nenc_%s_activity_%s.pdf' % (nenc, n)) plt.close('all') if load_df: load = np.load(load_df) d_out2 = load['d_out2'] taus_out2 = load['taus_out2'] f_out2 = DoubleExp(taus_out2[0], taus_out2[1]) else: print('Optimizing ens2 filters and decoders') stim_func = make_normed_flipped(value=1.0, t=t, dt=dt, f=f) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, w_ens=w_ens, w_ens2=w_ens2) d_out2, f_out2, taus_out2 = df_opt(data['x2'], data['ens2'], f_out, dt=dt, reg=0, penalty=0, name='identity_%s' % neuron_type) np.savez('data/identity_%s_df.npz' % neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1, d_out2=d_out2, taus_out2=taus_out2) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" % (-1. / f_out.poles[0], -1. / f_out.poles[1])) ax.plot(times, f_out2.impulse(len(times), dt=0.0001), label=r"$f^{out2}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" % (-1. / f_out2.poles[0], -1. / f_out2.poles[1], np.count_nonzero(d_out2), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/identity_%s_filters_out2.pdf" % neuron_type) fig, ax = plt.subplots() sns.distplot(np.ravel(d_out2)) ax.set(xlabel='decoders', ylabel='frequency') plt.savefig("plots/identity_%s_d_out2.pdf" % neuron_type) a_ens2 = f_out2.filt(data['ens2'], dt=dt) x2 = f_out.filt(data['x2'], dt=dt) xhat_ens2 = np.dot(a_ens2, d_out2) rmse_ens2 = rmse(xhat_ens2, x2) fig, ax = plt.subplots() ax.plot(data['times'], x2, linestyle="--", label='x') ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' % rmse_ens2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens2") plt.legend(loc='upper right') plt.savefig("plots/identity_%s_ens2_train.pdf" % neuron_type) rmses_ens = np.zeros((n_tests)) rmses_ens2 = np.zeros((n_tests)) for test in range(n_tests): print('test %s' % test) stim_func = make_normed_flipped(value=1.0, t=t_test, dt=dt, f=f, seed=100 + test) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_test, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, w_ens=w_ens, w_ens2=w_ens2) a_ens = f_out1.filt(data['ens'], dt=dt) x = f_out.filt(data['x'], dt=dt) xhat_ens = np.dot(a_ens, d_ens) rmse_ens = rmse(xhat_ens, x) a_ens2 = f_out2.filt(data['ens2'], dt=dt) x2 = f_out.filt(data['x2'], dt=dt) xhat_ens2 = np.dot(a_ens2, d_out2) rmse_ens2 = rmse(xhat_ens2, x2) rmses_ens[test] = rmse_ens rmses_ens2[test] = rmse_ens2 fig, ax = plt.subplots() ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' % rmse_ens) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1") plt.legend(loc='upper right') plt.savefig("plots/identity_%s_ens1_test_%s.pdf" % (neuron_type, test)) fig, ax = plt.subplots() ax.plot(data['times'], x2, linestyle="--", label='x') ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' % rmse_ens2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens2") plt.legend(loc='upper right') plt.savefig("plots/identity_%s_ens2_test_%s.pdf" % (neuron_type, test)) plt.close('all') mean_ens = np.mean(rmses_ens) mean_ens2 = np.mean(rmses_ens2) CI_ens = sns.utils.ci(rmses_ens) CI_ens2 = sns.utils.ci(rmses_ens2) fig, ax = plt.subplots() sns.barplot(data=rmses_ens2) ax.set(ylabel='RMSE', title="mean=%.3f, CI=%.3f-%.3f" % (mean_ens2, CI_ens2[0], CI_ens2[1])) plt.xticks() plt.savefig("plots/identity_%s_rmse.pdf" % neuron_type) print('rmses: ', rmses_ens, rmses_ens2) print('means: ', mean_ens, mean_ens2) print('confidence intervals: ', CI_ens, CI_ens2) np.savez('data/identity_%s_results.npz' % neuron_type, rmses_ens=rmses_ens, rmses_ens2=rmses_ens2) return rmses_ens2
def run(n_neurons=100, t=20, t_test=10, t_enc=30, dt=0.001, f=DoubleExp(1e-2, 2e-1), penalty=0, reg=1e-1, freq=1, tt=5.0, tt_test=5.0, neuron_type=LIF(), load_fd=False, load_w=None, supervised=False): d_ens = np.zeros((n_neurons, 2)) f_ens = f w_ff = None w_fb = None e_ff = None e_fb = None f_smooth = DoubleExp(1e-2, 2e-1) print('Neuron Type: %s' % neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w: w_ff = np.load(load_w)['w_ff'] e_ff = np.load(load_w)['e_ff'] else: print('optimizing encoders from pre to ens') data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_enc + tt, f=f, dt=dt, neuron_type=neuron_type, w_ff=w_ff, e_ff=e_ff, L_ff=True) w_ff = data['w_ff'] e_ff = data['e_ff'] np.savez('data/oscillate_w.npz', w_ff=w_ff, e_ff=e_ff) fig, ax = plt.subplots() sns.distplot(np.ravel(w_ff), ax=ax, kde=False) ax.set(xlabel='weights', ylabel='frequency') plt.savefig("plots/tuning/oscillate_%s_w_ff.pdf" % (neuron_type)) a_ens = f_smooth.filt(data['ens'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens') ax.set(ylim=((0, 40))) plt.legend() plt.savefig('plots/tuning/oscillate_pre_ens_activity_%s.pdf' % (n)) plt.close('all') if load_fd: load = np.load(load_fd) d_ens = load['d_ens'] taus_ens = load['taus_ens'] f_ens = DoubleExp(taus_ens[0], taus_ens[1]) else: print('gathering filter/decoder training data for ens') data = go(d_ens, f_ens, n_neurons=n_neurons, t=t + tt, f=f, dt=dt, neuron_type=neuron_type, w_ff=w_ff, L_fd=True) trans = int(tt / dt) d_ens, f_ens, taus_ens = df_opt(data['u'][trans:], data['ens'][trans:], f, dt=dt, name='oscillate_%s' % neuron_type, reg=reg, penalty=penalty) np.savez('data/oscillate_%s_fd.npz' % neuron_type, d_ens=d_ens, taus_ens=taus_ens) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f.impulse(len(times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" % (-1. / f.poles[0], -1. / f.poles[1])) ax.plot(times, f_ens.impulse(len(times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" % (-1. / f_ens.poles[0], -1. / f_ens.poles[1], np.count_nonzero(d_ens), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/oscillate_%s_filters_ens.pdf" % neuron_type) a_ens = f_ens.filt(data['ens'], dt=dt) x = f.filt(data['u'], dt=dt) xhat_ens = np.dot(a_ens, d_ens) rmse_ens = rmse(xhat_ens, x) fig, ax = plt.subplots() ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' % rmse_ens) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="pre_ens") plt.legend(loc='upper right') plt.savefig("plots/oscillate_%s_pre_ens_train.pdf" % neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w: w_fb = np.load(load_w)['w_fb'] e_fb = np.load(load_w)['e_fb'] else: print('optimizing encoders from supv to ens') data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_enc + tt, f=f, dt=dt, neuron_type=neuron_type, w_ff=w_ff, w_fb=w_fb, e_fb=e_fb, L_fb=True) w_fb = data['w_fb'] e_fb = data['e_fb'] np.savez('data/oscillate_w.npz', w_ff=w_ff, e_ff=e_ff, w_fb=w_fb, e_fb=e_fb) fig, ax = plt.subplots() sns.distplot(np.ravel(w_fb), ax=ax, kde=False) ax.set(xlabel='weights', ylabel='frequency') plt.savefig("plots/tuning/oscillate_%s_w_fb.pdf" % (neuron_type)) a_ens = f_smooth.filt(data['ens'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) # a_supv2 = f_smooth.filt(data['supv2'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv') # ax.plot(data['times'], a_supv2[:,n], alpha=0.5, label='supv2') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens') ax.set(ylim=((0, 40))) plt.legend() plt.savefig('plots/tuning/oscillate_supv_ens_activity_%s.pdf' % (n)) plt.close('all') print("Testing") if supervised: data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_test + tt_test, f=f, dt=dt, neuron_type=neuron_type, w_ff=w_ff, w_fb=w_fb, supervised=True) a_ens = f_ens.filt(data['ens'], dt=dt) a_supv = f_ens.filt(data['supv'], dt=dt) # a_supv2 = f_ens.filt(data['supv2'], dt=dt) xhat_ens_0 = np.dot(a_ens, d_ens)[:, 0] xhat_ens_1 = np.dot(a_ens, d_ens)[:, 1] xhat_supv_0 = np.dot(a_supv, d_ens)[:, 0] xhat_supv_1 = np.dot(a_supv, d_ens)[:, 1] # xhat_supv2_0 = np.dot(a_supv2, d_ens)[:,0] # xhat_supv2_1 = np.dot(a_supv2, d_ens)[:,1] x_0 = f.filt(data['u'], dt=dt)[:, 0] x_1 = f.filt(data['u'], dt=dt)[:, 1] x2_0 = f.filt(x_0, dt=dt) x2_1 = f.filt(x_1, dt=dt) times = data['times'] fig, ax = plt.subplots() ax.plot(times, x_0, linestyle="--", label='x_0') ax.plot(times, x2_0, linestyle="--", label='x2_0') ax.plot(times, xhat_supv_0, label='supv') ax.plot(times, xhat_ens_0, label='ens') # ax.plot(times, xhat_supv2_0, label='supv2') ax.set(xlim=((0, t_test)), ylim=((-1, 1)), xlabel='time (s)', ylabel=r'$\mathbf{x}$') plt.legend(loc='upper right') plt.savefig("plots/oscillate_%s_supervised_0.pdf" % neuron_type) fig, ax = plt.subplots() ax.plot(times, x_1, linestyle="--", label='x_1') ax.plot(times, x2_1, linestyle="--", label='x2_1') ax.plot(times, xhat_supv_1, label='supv') ax.plot(times, xhat_ens_1, label='ens') # ax.plot(times, xhat_supv2_1, label='supv2') ax.set(xlim=((0, t_test)), ylim=((-1, 1)), xlabel='time (s)', ylabel=r'$\mathbf{x}$') plt.legend(loc='upper right') plt.savefig("plots/oscillate_%s_supervised_1.pdf" % neuron_type) else: data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_test + tt_test, f=f, dt=dt, neuron_type=neuron_type, w_ff=w_ff, w_fb=w_fb) a_ens = f_ens.filt(data['ens'], dt=dt) xhat_ens_0 = np.dot(a_ens, d_ens)[:, 0] xhat_ens_1 = np.dot(a_ens, d_ens)[:, 1] x_0 = f.filt(data['u'], dt=dt)[:, 0] x_1 = f.filt(data['u'], dt=dt)[:, 1] x2_0 = f.filt(x_0, dt=dt) x2_1 = f.filt(x_1, dt=dt) times = data['times'] # fig, ax = plt.subplots() # ax.plot(times, x_0, linestyle="--", label='x0') # # ax.plot(times, sinusoid_0, label='best fit sinusoid_0') # ax.plot(times, xhat_ens_0, label='ens') # ax.set(xlim=((0, t_test)), ylim=((-1, 1)), xlabel='time (s)', ylabel=r'$\mathbf{x}$') # plt.legend(loc='upper right') # plt.savefig("plots/oscillate_%s_test_0.pdf"%neuron_type) # fig, ax = plt.subplots() # ax.plot(times, x_1, linestyle="--", label='x1') # # ax.plot(times, sinusoid_1, label='best fit sinusoid_1') # ax.plot(times, xhat_ens_1, label='ens') # ax.set(xlim=((0, t_test)), ylim=((-1, 1)), xlabel='time (s)', ylabel=r'$\mathbf{x}$') # plt.legend(loc='upper right') # plt.savefig("plots/oscillate_%s_test_1.pdf"%neuron_type) # curve fit to a sinusoid of arbitrary frequency, phase, magnitude print('Curve fitting') trans = int(tt_test / dt) step = int(0.001 / dt) def sinusoid(t, freq, phase, mag, dt=dt): # mag return f.filt(mag * np.sin(t * 2 * np.pi * freq + 2 * np.pi * phase), dt=dt) p0 = [1, 0, 1] param_0, _ = curve_fit(sinusoid, times[trans:], xhat_ens_0[trans:], p0=p0) param_1, _ = curve_fit(sinusoid, times[trans:], xhat_ens_1[trans:], p0=p0) print('param0', param_0) print('param1', param_1) sinusoid_0 = sinusoid(times, param_0[0], param_0[1], param_0[2]) sinusoid_1 = sinusoid(times, param_1[0], param_1[1], param_1[2]) # error is rmse of xhat and best fit sinusoid times freq error of best fit sinusoid to x freq_error_0 = np.abs(freq - param_0[1]) freq_error_1 = np.abs(freq - param_1[1]) rmse_0 = rmse(xhat_ens_0[trans::step], sinusoid_0[trans::step]) rmse_1 = rmse(xhat_ens_1[trans::step], sinusoid_1[trans::step]) scaled_rmse_0 = (1 + freq_error_0) * rmse_0 scaled_rmse_1 = (1 + freq_error_1) * rmse_1 fig, ax = plt.subplots() ax.plot(times, x_0, linestyle="--", label='x0') ax.plot(times, sinusoid_0, label='best fit sinusoid_0') ax.plot(times, xhat_ens_0, label='ens, scaled rmse=%.3f' % scaled_rmse_0) ax.axvline(tt_test, label=r"$t_{transient}$") ax.set(ylim=((-1, 1)), xlabel='time (s)', ylabel=r'$\mathbf{x}$') plt.legend(loc='upper right') plt.savefig("plots/oscillate_%s_test_0.pdf" % neuron_type) fig, ax = plt.subplots() ax.plot(times, x_1, linestyle="--", label='x1') ax.plot(times, sinusoid_1, label='best fit sinusoid_1') ax.plot(times, xhat_ens_1, label='ens, scaled rmse=%.3f' % scaled_rmse_1) ax.axvline(tt_test, label=r"$t_{transient}$") ax.set(ylim=((-1, 1)), xlabel='time (s)', ylabel=r'$\mathbf{x}$') plt.legend(loc='upper right') plt.savefig("plots/oscillate_%s_test_1.pdf" % neuron_type) print('scaled rmses: ', scaled_rmse_0, scaled_rmse_1) mean = np.mean([scaled_rmse_0, scaled_rmse_1]) fig, ax = plt.subplots() sns.barplot(data=np.array([mean])) ax.set(ylabel='Scaled RMSE', title="mean=%.3f" % mean) plt.xticks() plt.savefig("plots/oscillate_%s_scaled_rmse.pdf" % neuron_type) np.savez('data/oscillate_%s_results.npz' % neuron_type, scaled_rmse_0=scaled_rmse_0, scaled_rmse_1=scaled_rmse_1) return mean
def run(n_neurons=100, t=10, t_flat=3, t_test=10, n_train=20, n_test=10, reg=1e-1, penalty=0.0, df_evals=100, T=0.2, dt=0.001, neuron_type=LIF(), f=DoubleExp(1e-2, 2e-1), f_smooth=DoubleExp(1e-2, 2e-1), w_file="data/memory_w.npz", fd_file="data/memory_DurstewitzNeuron()_fd.npz", load_w_x=False, load_w_u=False, load_w_fb=False, load_fd=False, supervised=False): d_ens = np.zeros((n_neurons, 1)) f_ens = f w_u = None w_x = None w_fb = None DA = None if isinstance(neuron_type, DurstewitzNeuron): DA = neuron_type.DA if load_w_x: w_x = np.load(w_file)['w_x'] e_x = np.load(w_file)['e_x'] else: print('optimizing encoders from pre_x into ens (white noise)') e_x = None w_x = None for nenc in range(n_train): print("encoding trial %s" % nenc) u = make_signal(t=t, t_flat=t_flat, f=f, normed='x', seed=nenc) t_sim = len(u) * dt - dt stim_func = lambda t: u[int(t / dt)] data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_sim, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, T=T, e_x=e_x, w_x=w_x, L_x=True) w_x = data['w_x'] e_x = data['e_x'] np.savez('data/memory_DA%s_w.npz' % DA, w_x=w_x, e_x=e_x) a_ens = f_smooth.filt(data['ens'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens') ax.set(ylabel='firing rate', ylim=((0, 40))) plt.legend() plt.savefig( 'plots/tuning/memory_x_DA%s_nenc_%s_activity_%s.pdf' % (DA, nenc, n)) plt.close('all') if load_w_u: w_u = np.load(w_file)['w_u'] e_u = np.load(w_file)['e_u'] else: print('optimizing encoders from pre_u into ens (white noise)') e_u = None w_u = None bases = np.linspace(-0.5, 0.5, n_train) for nenc in range(n_train): print("encoding trial %s" % nenc) u = make_signal(t=t, t_flat=t_flat, f=f, normed='x', seed=nenc) t_sim = len(u) * dt - dt stim_func = lambda t: u[int(t / dt)] stim_func_base = lambda t: bases[nenc] data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_sim, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, T=T, w_x=w_x, e_u=e_u, w_u=w_u, stim_func_base=stim_func_base, L_u=True) w_u = data['w_u'] e_u = data['e_u'] np.savez('data/memory_DA%s_w.npz' % DA, w_x=w_x, e_x=e_x, w_u=w_u, e_u=e_u) a_ens = f_smooth.filt(data['ens'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens') ax.set(ylabel='firing rate', ylim=((0, 40))) plt.legend() plt.savefig( 'plots/tuning/memory_u_DA%s_nenc_%s_activity_%s.pdf' % (DA, nenc, n)) plt.close('all') if load_fd: load = np.load(fd_file) d_ens = load['d_ens'] taus_ens = load['taus_ens'] f_ens = DoubleExp(taus_ens[0], taus_ens[1]) else: print( 'gathering filter/decoder training data for ens (white-flat-white)' ) targets = np.zeros((1, 1)) spikes = np.zeros((1, n_neurons)) for ntrn in range(n_train): print('filter/decoder iteration %s' % ntrn) u = make_signal(t=t, t_flat=t_flat, f=f, normed='x', seed=ntrn, dt=dt) t_sim = len(u) * dt - dt stim_func = lambda t: u[int(t / dt)] data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_sim, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, T=T, w_x=w_x, w_u=w_u, L_fd=True) targets = np.append(targets, data['x'], axis=0) spikes = np.append(spikes, data['ens'], axis=0) print('optimizing filters and decoders') d_ens, f_ens, taus_ens = df_opt(targets, spikes, f, dt=dt, penalty=penalty, reg=reg, df_evals=df_evals, name='flat_%s' % neuron_type) if DA: np.savez('data/memory_%s_DA%s_fd.npz' % (neuron_type, DA), d_ens=d_ens, taus_ens=taus_ens) else: np.savez('data/memory_%s_fd.npz' % neuron_type, d_ens=d_ens, taus_ens=taus_ens) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f.impulse(len(times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" % (-1. / f.poles[0], -1. / f.poles[1])) ax.plot(times, f_ens.impulse(len(times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" % (-1. / f_ens.poles[0], -1. / f_ens.poles[1], np.count_nonzero(d_ens), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() if DA: plt.savefig("plots/memory_%s_DA%s_filters.pdf" % (neuron_type, DA)) else: plt.savefig("plots/memory_%s_filters.pdf" % neuron_type) a_ens = f_ens.filt(spikes, dt=dt) xhat_ens = np.dot(a_ens, d_ens) x = f.filt(targets, dt=dt) rmse_ens = rmse(xhat_ens, x) fig, ax = plt.subplots() ax.plot(x, linestyle="--", label='x') ax.plot(xhat_ens, label='ens, rmse=%.3f' % rmse_ens) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train_fb") plt.legend(loc='upper right') if DA: plt.savefig("plots/memory_%s_DA%s_train_fb.pdf" % (neuron_type, DA)) else: plt.savefig("plots/memory_%s_train_fb.pdf" % neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w_fb: w_fb = np.load(w_file)['w_fb'] e_fb = np.load(w_file)['e_fb'] else: print('optimizing encoders from ens2 into ens (white noise)') e_fb = None w_fb = None for nenc in range(n_train): print("encoding trial %s" % nenc) u = make_signal(t=t, t_flat=t_flat, f=f, normed='x', seed=nenc) t_sim = len(u) * dt - dt stim_func = lambda t: u[int(t / dt)] data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_sim, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, T=T, w_x=w_x, w_u=w_u, e_fb=e_fb, w_fb=w_fb, L_fb=True) w_fb = data['w_fb'] e_fb = data['e_fb'] np.savez('data/memory_DA%s_w.npz' % DA, w_x=w_x, e_x=e_x, w_u=w_u, e_u=e_u, w_fb=w_fb, e_fb=e_fb) a_ens = f_smooth.filt(data['ens'], dt=dt) a_ens2 = f_smooth.filt(data['ens2'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) # a_supv2 = f_smooth.filt(data['supv2'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:, n], alpha=0.5, label='supv') # ax.plot(data['times'], a_supv2[:,n], alpha=0.5, label='supv2') ax.plot(data['times'], a_ens[:, n], alpha=0.5, label='ens') ax.plot(data['times'], a_ens2[:, n], alpha=0.5, label='ens2') ax.set(ylabel='firing rate', ylim=((0, 40))) plt.legend() plt.savefig( 'plots/tuning/memory_fb_DA%s_nenc_%s_activity_%s.pdf' % (DA, nenc, n)) plt.close('all') errors_flat = np.zeros((n_test)) errors_final = np.zeros((n_test)) errors_abs = np.zeros((n_test, int(t_test / dt))) for test in range(n_test): print('test %s' % test) u = make_signal(t=6, t_flat=t_test, f=f, normed='x', seed=test, dt=dt, test=True) t_sim = len(u) * dt - dt stim_func = lambda t: u[int(t / dt)] if supervised: data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_sim, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, T=T, w_x=w_x, w_u=w_u, w_fb=w_fb, supervised=True) a_ens = f_ens.filt(data['ens'], dt=dt) a_ens2 = f_ens.filt(data['ens2'], dt=dt) x = f.filt(f.filt(data['x'], dt=dt), dt=dt) xhat_ens = np.dot(a_ens, d_ens) xhat_ens2 = np.dot(a_ens2, d_ens) xhat_supv = data['supv_state'] xhat_supv2 = data['supv2_state'] error_ens = rmse(xhat_ens, x) error_ens2 = rmse(xhat_ens2, x) error_supv = rmse(xhat_supv, x) error_supv2 = rmse(xhat_supv2, x) fig, ax = plt.subplots() ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='ens, error=%.3f' % error_ens) ax.plot(data['times'], xhat_ens2, label='ens2, error=%.3f' % error_ens2) ax.plot(data['times'], xhat_supv, label='supv, error=%.3f' % error_supv) ax.plot(data['times'], xhat_supv2, label='supv2, error=%.3f' % error_supv2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="supervised test") plt.legend(loc='upper right') if DA: plt.savefig("plots/memory_%s_DA%s_supervised_test_%s.pdf" % (neuron_type, DA, test)) else: plt.savefig("plots/memory_%s_supervised_test_%s.pdf" % (neuron_type, test)) plt.close('all') else: data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_sim, f=f, dt=dt, neuron_type=neuron_type, stim_func=stim_func, T=T, w_u=w_u, w_x=None, w_fb=w_fb) a_ens = f_ens.filt(data['ens'], dt=dt) u = f.filt(f.filt(T * data['u'], dt=dt), dt=dt) x = f.filt(data['x'], dt=dt) xhat_ens = np.dot(a_ens, d_ens) error_flat = rmse(xhat_ens[-int(t_test / dt):], x[-int(t_test / dt):]) error_final = rmse(xhat_ens[-1], x[-1]) error_abs = np.abs(xhat_ens[-int(t_test / dt):, 0] - x[-int(t_test / dt):, 0]) errors_flat[test] = error_flat errors_final[test] = error_final errors_abs[test] = error_abs if test > 10: continue fig, ax = plt.subplots() ax.plot(data['times'], u, linestyle="--", label='u') ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='error_flat=%.3f, error_final=%.3f' % (error_flat, error_final)) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test") plt.legend(loc='upper right') if DA: plt.savefig("plots/memory_%s_DA%s_test_%s.pdf" % (DA, neuron_type, test)) else: plt.savefig("plots/memory_%s_test_%s.pdf" % (neuron_type, test)) plt.close('all') fig, ax = plt.subplots() ax.plot(data['times'][-int(t_test / dt):], error_abs, linestyle="--", label='error') ax.set(xlabel='time (s)', ylabel=r'$|\mathbf{x} - \mathbf{\hat{x}}|$', title="test") # plt.legend(loc='upper right') if DA: plt.savefig("plots/memory_abs_%s_DA%a_test_%s.pdf" % (DA, neuron_type, test)) else: plt.savefig("plots/memory_abs_%s_test_%s.pdf" % (neuron_type, test)) plt.close('all') mean_flat = np.mean(errors_flat) mean_final = np.mean(errors_final) CI_flat = sns.utils.ci(errors_flat) CI_final = sns.utils.ci(errors_final) if DA: np.savez("data/%s_DA%s_errors_abs.npz" % (neuron_type, DA), errors_abs=errors_abs) else: np.savez("data/%s_errors_abs.npz" % neuron_type, errors_abs=errors_abs) # errors = np.vstack((errors_flat, errors_final)) # names = ['flat', 'final'] # fig, ax = plt.subplots() # sns.barplot(data=errors.T) # ax.set(ylabel='RMSE') # plt.xticks(np.arange(len(names)), tuple(names), rotation=0) # plt.savefig("plots/memory_%s_errors.pdf"%neuron_type) dfs = [] columns = ("nAvg", "time", "error") for a in range(errors_abs.shape[0]): for t in range(errors_abs.shape[1]): dfs.append( pd.DataFrame([[a, dt * t, errors_abs[a, t]]], columns=columns)) df = pd.concat([df for df in dfs], ignore_index=True) fig, ax = plt.subplots() sns.lineplot(data=df, x="time", y="error", ax=ax) ax.set(xlabel='time (s)', ylabel=r'$|\mathbf{x} - \mathbf{\hat{x}}|$') fig.tight_layout() if DA: fig.savefig("plots/memory_abs_%s_DA%s_all.pdf" % (neuron_type, DA)) else: fig.savefig("plots/memory_abs_%s_all.pdf" % (neuron_type)) plt.close('all') print("errors: mean flat=%.3f, mean final=%.3f" % (np.mean(errors_flat), np.mean(errors_final))) return errors_flat, errors_final