def do_simulation(path): """ Uses a model identified by path to run a naive and a trained simulation :param path: The model path :return: [0]: The facing angle bin centers [1]: The occupancy of the naive model [2]: The occupancy of the trained model """ global std_pt bins = np.linspace(-np.pi, np.pi, 100) # bin-centers in degress bcenters = bins[:-1] + np.diff(bins) / 2 # naive simulation mdata = c.ModelData(path) model_naive = c.ZfGpNetworkModel() model_naive.load(mdata.ModelDefinition, mdata.FirstCheckpoint) model_trained = c.ZfGpNetworkModel() model_trained.load(mdata.ModelDefinition, mdata.LastCheckpoint) sim = MoTypes(False).pt_sim(model_naive, std_pt, 100) pos_naive = sim.run_simulation(GlobalDefs.n_steps) h_naive = a.bin_simulation_pt(pos_naive, bins) sim = MoTypes(False).pt_sim(model_trained, std_pt, 100) pos_trained = sim.run_simulation(GlobalDefs.n_steps) h_trained = a.bin_simulation_pt(pos_trained, bins) return bcenters, h_naive, h_trained
def get_cluster_assignments(mt: MoTypes, model_dir: str, regressors, t_stimulus, std, droplist): """ Creates a dictionary of cluster assignments for cells in t and m branch of a model :param mt: The model organism to use :param model_dir: The folder of the model checkpoint :param regressors: The cluster regressors :param t_stimulus: The temperature stimulus to use :param std: The standardizations :param droplist: Unit drop list :return: Dictionary with 't' and 'm' unit responses to stimulus """ md = c.ModelData(model_dir) ml = mt.network_model() ml.load(md.ModelDefinition, md.LastCheckpoint) # prepend lead-in to stimulus lead_in = np.full(ml.input_dims[2] - 1, np.mean(t_stimulus[:10])) temp = np.r_[lead_in, t_stimulus] act_dict = ml.unit_stimulus_responses(temp, None, None, std, droplist) mpool = get_pool() ares = { k: [ mpool.apply_async(get_best_fit, (ad, regressors)) for ad in act_dict[k] ] for k in ['t', 'm'] } retval = {k: np.vstack([ar.get() for ar in ares[k]]) for k in ares} return retval
def playback_response_helper(mo_type, model_path, stimulus, std, no_pad_size, nreps): mdata = c.ModelData(model_path) gpn_wn = mo_type.network_model() gpn_wn.load(mdata.ModelDefinition, mdata.LastCheckpoint) wna = mo_type.wn_sim(std, gpn_wn, t_preferred=GlobalDefs.tPreferred) ev_path = model_path + '/evolve/generation_weights.npy' ev_weights = np.load(ev_path) w = np.mean(ev_weights[-1, :, :], 0) wna.bf_weights = w wna.eval_every = 2 # fast evaluation to keep up with stimulus fluctuations traces = [] for r in range(nreps): bt = wna.compute_openloop_behavior(stimulus)[0].astype(float) traces.append(bt[-no_pad_size:]) traces = np.vstack(traces) p_move = np.mean( traces > 0, 0 ) # probability of selecting a movement bout (p_bout weights plus pred. control) p_bout = np.mean( traces > -1, 0) # any behavior selected - purely under control of p_bout weights # compute magnitude by using expected values for straight and turn bouts traces[traces < 1] = np.nan traces[traces < 2] = 0 traces[traces > 1] = 30 mag = np.nanmean(traces, 0) return p_move, p_bout, mag
def run_flat_gradient(model_path): mdata = c.ModelData(model_path) gpn = c.SimpleRLNetwork() gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint) arena = CircleRLTrainer(rl_net, sim_radius, 22, 22, 26) arena.t_std = t_std arena.t_mean = t_mean arena.p_explore = 0.25 return circ_train.run_sim(GlobalDefs.n_steps, False)[0]
def run_flat_gradient(model_path, drop_list=None): mdata = c.ModelData(model_path) gpn = MoTypes(False).network_model() gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint) flt_params = GlobalDefs.circle_sim_params.copy() flt_params["t_max"] = flt_params["t_min"] sim = MoTypes(False).rad_sim(gpn, std, **flt_params) sim.t_max = sim.t_min # reset gradient to be flat sim.remove = drop_list evo_path = model_path + '/evolve/generation_weights.npy' evo_weights = np.load(evo_path) w = np.mean(evo_weights[-1, :, :], 0) sim.bf_weights = w return sim.run_simulation(GlobalDefs.n_steps, False)
def unit_wn_helper(mo_type, model_path, std, nsamples): md_wn = c.ModelData(model_path) gpn_wnsim = mo_type.network_model() gpn_wnsim.load(md_wn.ModelDefinition, md_wn.LastCheckpoint) wnsim = mo_type.wn_sim(std, gpn_wnsim, stim_std=2) wnsim.switch_mean = 5 wnsim.switch_std = 1 ev_path = model_path + '/evolve/generation_weights.npy' ev_weights = np.load(ev_path) wts = np.mean(ev_weights[-1, :, :], 0) wnsim.bf_weights = wts all_triggered_units = wnsim.compute_behav_trig_activity(nsamples) units_straight = all_triggered_units[1]['t'] # only use units in temperature branch left = all_triggered_units[2]['t'] right = all_triggered_units[3]['t'] units_turn = [l + r for (l, r) in zip(left, right)] return units_straight, units_turn
def compute_white_noise(base_freq): if base_freq == 0.5: paths = paths_05Hz base = base_path_05Hz std = std_05Hz n_steps = 100000000 elif base_freq == 1.0: paths = paths_512_zf base = base_path_zf std = std_zf n_steps = 10000000 # there are 10 times as many models elif base_freq == 2.0: paths = paths_2Hz base = base_path_2Hz std = std_2Hz n_steps = 50000000 else: raise ValueError("Indicated base frequency has not been trained") behav_kernels = {} k_names = ["stay", "straight", "left", "right"] for p in paths: m_path = mpath(base, p) mdata_wn = c.ModelData(m_path) gpn_wn = mo.network_model() gpn_wn.load(mdata_wn.ModelDefinition, mdata_wn.LastCheckpoint) wna = mo.wn_sim(std, gpn_wn, stim_std=2) wna.p_move *= base_freq wna.bf_mult = base_freq wna.switch_mean = 5 wna.switch_std = 1 ev_path = m_path + '/evolve/generation_weights.npy' weights = np.load(ev_path) w = np.mean(weights[-1, :, :], 0) wna.bf_weights = w kernels = wna.compute_behavior_kernels(n_steps) for i, n in enumerate(k_names): if n in behav_kernels: behav_kernels[n].append(kernels[i]) else: behav_kernels[n] = [kernels[i]] time = np.linspace(-4, 1, behav_kernels['straight'][0].size) for n in k_names: behav_kernels[n] = np.vstack(behav_kernels[n]) plot_kernel = (behav_kernels["straight"] + behav_kernels["left"] + behav_kernels["right"]) / 3 return time, plot_kernel
def get_cell_responses(path, temp): """ Loads a model and computes the temperature response of all neurons returning response matrix :param path: Model path :param temp: Temperature stimulus :return: n-timepoints x m-neurons matrix of responses """ global std_pt mdata = c.ModelData(path) # create our model and load from last checkpoint gpn = c.ZfGpNetworkModel() gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint) # prepend lead-in to stimulus lead_in = np.full(gpn.input_dims[2] - 1, np.mean(temp[:10])) temp = np.r_[lead_in, temp] activities = gpn.unit_stimulus_responses(temp, None, None, std_pt) return np.hstack(activities['t']) if 't' in activities else np.hstack( activities['m'])
def get_cell_responses_rl(path, temp, temp_mean, temp_std, trained=True): """ Loads a model and computes the temperature response of all neurons returning response matrix :param path: Model path :param temp: Temperature stimulus :param temp_mean: Average training temperature :param temp_std: Training temperature standard deviation :param trained: If false load naive network otherwise trained :return: n-timepoints x m-neurons matrix of responses """ mdata = core.ModelData(path) rl = core.ReinforcementLearningNetwork() rl.load(mdata.ModelDefinition, mdata.LastCheckpoint if trained else mdata.FirstCheckpoint) # prepend lead-in to stimulus lead_in = np.full(rl.input_dims[2] - 1, np.asscalar(np.mean(temp[:10]))) temp = np.r_[lead_in, temp] activities = rl.unit_stimulus_responses(temp, temp_mean, temp_std) return np.hstack(activities['t']) if 't' in activities else np.hstack( activities['m'])
def get_cell_responses_predictive(path, stimulus, std: core.GradientStandards, trained=True): """ Loads a model and computes the temperature response of all neurons returning response matrix :param path: Model path :param stimulus: Temperature stimulus :param std: Input standardizations :param trained: If false load naive network otherwise trained :return: n-timepoints x m-neurons matrix of responses """ mdata = core.ModelData(path) # create our model and load from checkpoint gpn = core.ZfGpNetworkModel() gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint if trained else mdata.FirstCheckpoint) # prepend lead-in to stimulus lead_in = np.full(gpn.input_dims[2] - 1, np.asscalar(np.mean(stimulus[:10]))) temp = np.r_[lead_in, stimulus] activities = gpn.unit_stimulus_responses(temp, None, None, std) return np.hstack(activities['t']) if 't' in activities else np.hstack( activities['m'])
fig, ax = pl.subplots() sns.tsplot(all_units_turn[:, clust_ids_zf == int_off].T, kernel_time, n_boot=1000, color="C0", ax=ax) sns.tsplot(all_units_straight[:, clust_ids_zf == int_off].T, kernel_time, n_boot=1000, color="C1", ax=ax) ax.plot([kernel_time.min(), kernel_time.max()], [0, 0], 'k--', lw=0.25) ax.plot([0, 0], [-0.001, 0.001], 'k--', lw=0.25) ax.set_ylabel("Activation") ax.set_xlabel("Time around bout [s]") sns.despine(fig, ax) fig.savefig(save_folder + "behav_triggered_IntOFF.pdf", type="pdf") # panel 1 - white noise analysis on naive networks behav_kernels = {} k_names = ["stay", "straight", "left", "right"] for p in paths_512_zf: m_path = mpath(base_path_zf, p) mdata_wn = c.ModelData(m_path) gpn_wn = mo.network_model() gpn_wn.load(mdata_wn.ModelDefinition, mdata_wn.FirstCheckpoint) wna = mo.wn_sim(std_zf, gpn_wn, stim_std=2) wna.switch_mean = 5 wna.switch_std = 1 kernels = wna.compute_behavior_kernels(10000000) for j, n in enumerate(k_names): if n in behav_kernels: behav_kernels[n].append(kernels[j]) else: behav_kernels[n] = [kernels[j]] kernel_time = np.linspace(-4, 1, behav_kernels['straight'][0].size) for n in k_names: behav_kernels[n] = np.vstack(behav_kernels[n]) plot_kernels = {"straight": behav_kernels["straight"], "turn": (behav_kernels["left"] + behav_kernels["right"])/2}
# panel - input connectivity into second layer of t branch conn_mat = np.zeros((8, 8, len(paths_512_ce))) for i, p in enumerate(paths_512_ce): model_cids = clust_ids_ce[all_ids_ce[0, :] == i] layers_ids = all_ids_ce[1, :][all_ids_ce[0, :] == i] l_0_mask = np.full(8, False) ix = model_cids[layers_ids == 0] ix = ix[ix != -1] l_0_mask[ix] = True l_1_mask = np.full(8, False) ix = model_cids[layers_ids == 1] ix = ix[ix != -1] l_1_mask[ix] = True m_path = mpath(base_path_ce, p) mdata = c.ModelData(m_path) gpn = MoTypes(True).network_model() gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint) input_result = gpn.parse_layer_input_by_cluster( 't', 1, model_cids[layers_ids == 0], model_cids[layers_ids == 1]) for k, l0 in enumerate(np.arange(8)[l_0_mask]): for l, l1 in enumerate(np.arange(8)[l_1_mask]): conn_mat[l0, l1, i] = input_result[k, l] # reordered version of conn_mat based on known types cm_order = [1, 7, 0, 2, 3, 4, 5, 6] cm_reordered = conn_mat[:, cm_order, :] cm_reordered = cm_reordered[cm_order, :, :] m = np.mean(cm_reordered, 2) s = np.std(cm_reordered, 2) cross_0 = np.sign((m + s) * (m - s)) <= 0 m[cross_0] = 0
net_up = [] # store turn persistence rl_turn_coherence = [] # store turn angles rl_da = [] # Panel - gradient navigation performance - rl net and predictive network sim_radius = 100 sim_min = 22 sim_max = 37 bns = np.linspace(0, sim_radius, 100) centers = bns[:-1] + np.diff(bns) centers = centers / sim_radius * (sim_max - sim_min) + sim_min naive_rl = np.zeros((20, centers.size)) trained_rl = np.zeros_like(naive_rl) for i, p in enumerate(paths_rl): mdata = c.ModelData(mpath(base_path_rl, p)) with c.ReinforcementLearningNetwork() as rl_net: rl_net.load(mdata.ModelDefinition, mdata.FirstCheckpoint) circ_train = CircleRLTrainer(rl_net, sim_radius, sim_min, sim_max, 26) circ_train.t_std = t_std circ_train.t_mean = t_mean circ_train.p_explore = 0.25 # try to match to exploration in predictive network (best taken 50% of time) naive_pos = circ_train.run_sim(GlobalDefs.n_steps, False)[0] rl_net.load(mdata.ModelDefinition, mdata.LastCheckpoint) circ_train = CircleRLTrainer(rl_net, sim_radius, sim_min, sim_max, 26) circ_train.t_std = t_std circ_train.t_mean = t_mean circ_train.p_explore = 0.25 # try to match to exploration in predictive network (best taken 50% of time) trained_pos, _, trained_behav = circ_train.run_sim(GlobalDefs.n_steps, False) rl_turn_coherence.append(a.turn_coherence(np.array(trained_behav), 10)) rl_da.append(np.rad2deg(get_bout_da(trained_pos, get_bout_starts(trained_pos))))