def compute_white_noise(base_freq): if base_freq == 0.5: paths = paths_05Hz base = base_path_05Hz std = std_05Hz n_steps = 100000000 elif base_freq == 1.0: paths = paths_512_zf base = base_path_zf std = std_zf n_steps = 10000000 # there are 10 times as many models elif base_freq == 2.0: paths = paths_2Hz base = base_path_2Hz std = std_2Hz n_steps = 50000000 else: raise ValueError("Indicated base frequency has not been trained") behav_kernels = {} k_names = ["stay", "straight", "left", "right"] for p in paths: m_path = mpath(base, p) mdata_wn = c.ModelData(m_path) gpn_wn = mo.network_model() gpn_wn.load(mdata_wn.ModelDefinition, mdata_wn.LastCheckpoint) wna = mo.wn_sim(std, gpn_wn, stim_std=2) wna.p_move *= base_freq wna.bf_mult = base_freq wna.switch_mean = 5 wna.switch_std = 1 ev_path = m_path + '/evolve/generation_weights.npy' weights = np.load(ev_path) w = np.mean(weights[-1, :, :], 0) wna.bf_weights = w kernels = wna.compute_behavior_kernels(n_steps) for i, n in enumerate(k_names): if n in behav_kernels: behav_kernels[n].append(kernels[i]) else: behav_kernels[n] = [kernels[i]] time = np.linspace(-4, 1, behav_kernels['straight'][0].size) for n in k_names: behav_kernels[n] = np.vstack(behav_kernels[n]) plot_kernel = (behav_kernels["straight"] + behav_kernels["left"] + behav_kernels["right"]) / 3 return time, plot_kernel
clust_ids_zf = np.array(clfile["clust_ids"]) clfile.close() # load and interpolate temperature stimulus dfile = h5py.File("stimFile.hdf5", 'r') tsin = np.array(dfile['sine_L_H_temp']) x = np.arange(tsin.size) # stored at 20 Hz ! xinterp = np.linspace(0, tsin.size, tsin.size * GlobalDefs.frame_rate // 20) temperature = np.interp(xinterp, x, tsin) dfile.close() # get activity data all_ids_zf = [] all_cells_zf = [] for i, p in enumerate(paths_512_zf): cell_res, ids = ana_zf.temperature_activity(mpath(base_path_zf, p), temperature, i) all_ids_zf.append(ids) all_cells_zf.append(cell_res) all_ids_zf = np.hstack(all_ids_zf) all_cells_zf = np.hstack(all_cells_zf) # convolve activity with nuclear gcamp calcium kernel tau_on = 1.4 # seconds tau_on *= GlobalDefs.frame_rate # in frames tau_off = 2 # seconds tau_off *= GlobalDefs.frame_rate # in frames kframes = np.arange(10 * GlobalDefs.frame_rate) # 10 s long kernel kernel = 2 ** (-kframes / tau_off) * (1 - 2 ** (-kframes / tau_on)) kernel = kernel / kernel.sum() # convolve with our kernel for i in range(all_cells_zf.shape[1]):
# the names of these regressors according to Haesemeyer et al., 2018 reg_names = ["Fast ON", "Slow ON", "Fast OFF", "Slow OFF"] # load and interpolate temperature stimulus dfile = h5py.File("stimFile.hdf5", 'r') tsin = np.array(dfile['sine_L_H_temp']) x = np.arange(tsin.size) # stored at 20 Hz ! xinterp = np.linspace(0, tsin.size, tsin.size * GlobalDefs.frame_rate // 20) temperature = np.interp(xinterp, x, tsin) dfile.close() # get activity data all_cells = [] all_ids = [] for i, p in enumerate(paths_512): cell_res, ids = ana.temperature_activity(mpath(base_path, p), temperature, i) all_cells.append(cell_res) all_ids.append(ids) all_cells = np.hstack(all_cells) all_ids = np.hstack(all_ids) # convolve activity with nuclear gcamp calcium kernel tau_on = 1.4 # seconds tau_on *= GlobalDefs.frame_rate # in frames tau_off = 2 # seconds tau_off *= GlobalDefs.frame_rate # in frames kframes = np.arange(10 * GlobalDefs.frame_rate) # 10 s long kernel kernel = 2 ** (-kframes / tau_off) * (1 - 2 ** (-kframes / tau_on)) kernel = kernel / kernel.sum() # convolve with our kernel for i in range(all_cells.shape[1]):
clfile.close() # load and interpolate temperature stimulus dfile = h5py.File("stimFile.hdf5", 'r') tsin = np.array(dfile['sine_L_H_temp']) x = np.arange(tsin.size) # stored at 20 Hz ! xinterp = np.linspace(0, tsin.size, tsin.size * GlobalDefs.frame_rate // 20) temperature = np.interp(xinterp, x, tsin) dfile.close() # get activity data all_ids_zf = [] all_cells_zf = [] for i, p in enumerate(paths_512_zf): cell_res, ids = ana_zf.temperature_activity(mpath(base_path_zf, p), temperature, i) all_ids_zf.append(ids) all_cells_zf.append(cell_res) all_ids_zf = np.hstack(all_ids_zf) all_cells_zf = np.hstack(all_cells_zf) all_ids_ce = [] all_cells_ce = [] for i, p in enumerate(paths_512_ce): cell_res, ids = ana_ce.temperature_activity(mpath(base_path_ce, p), temperature, i) all_ids_ce.append(ids) all_cells_ce.append(cell_res) all_ids_ce = np.hstack(all_ids_ce) all_cells_ce = np.hstack(all_cells_ce)
fig.savefig(save_folder + "test_errors_th.pdf", type="pdf") std_zf = c.GradientData.load_standards("gd_training_data.hdf5") ana_th = a.Analyzer(MoTypes(False), std_zf, "sim_store_tanh.hdf5", "activity_store_tanh.hdf5") ana_zf = a.Analyzer(MoTypes(False), std_zf, "sim_store.hdf5", "activity_store.hdf5") # second panel: Gradient distribution bns = np.linspace(0, GlobalDefs.circle_sim_params["radius"], 100) centers = a.temp_convert(bns[:-1] + np.diff(bns), "r") naive = np.empty((len(paths_512_th), centers.size)) trained_th = np.empty_like(naive) trained_zf = np.empty((len(paths_512_zf), centers.size)) for i, p in enumerate(paths_512_th): pos_n = ana_th.run_simulation(mpath(base_path_th, p), "r", "naive") naive[i, :] = a.bin_simulation(pos_n, bns, "r") pos_t = ana_th.run_simulation(mpath(base_path_th, p), "r", "trained") trained_th[i, :] = a.bin_simulation(pos_t, bns, "r") for i, p in enumerate(paths_512_zf): pos_t = ana_zf.run_simulation(mpath(base_path_zf, p), "r", "trained") trained_zf[i, :] = a.bin_simulation(pos_t, bns, "r") fig, ax = pl.subplots() sns.tsplot(naive, centers, n_boot=1000, condition="Naive", color='k') sns.tsplot(trained_th, centers, n_boot=1000, condition="Trained", color="C1") ax.plot(centers, np.mean(trained_zf, 0), 'k', lw=0.25) ax.plot([GlobalDefs.tPreferred, GlobalDefs.tPreferred], [0, 0.03],