def simulate_data(self, pPred_cond_rt_ch: torch.Tensor, seed=0, rt_only=False): torch.random.manual_seed(seed) if rt_only: dcond_tr = self.dcond_tr chSim_tr_dim = self.ch_tr_dim chSimFlat_tr = consts.ch_by_dim2ch_flat(chSim_tr_dim) pPred_tr_rt = pPred_cond_rt_ch[dcond_tr, :, chSimFlat_tr] rtSim_tr = npy( npt.categrnd(probs=npt.sumto1(pPred_tr_rt, -1)) * self.dt) else: dcond_tr = self.dcond_tr pPred_tr_rt_ch = pPred_cond_rt_ch[dcond_tr, :, :] n_tr, nt, n_ch = pPred_tr_rt_ch.shape chSim_tr_rt_ch = npy( npt.categrnd(probs=pPred_tr_rt_ch.reshape([n_tr, -1]))) rtSim_tr = npy((chSim_tr_rt_ch // n_ch) * self.dt) chSim_tr = npy(chSim_tr_rt_ch % n_ch) chs = np.array(consts.CHS) chSim_tr_dim = np.stack( [chs[dim][chSim_tr] for dim in range(consts.N_DIM)], -1) self.update_data(ch_tr_dim=chSim_tr_dim, rt_tr=rtSim_tr)
def dat2p_dat( self, ch_tr_dim: np.ndarray, dur_tr: np.ndarray, ev_tr_dim: np.ndarray ) -> (torch.Tensor, torch.Tensor, np.ndarray, np.ndarray, np.ndarray, np.ndarray): """ :param ch_tr_dim: [tr, dim] :param dur_tr: [tr] :param ev_tr_dim: [tr, dim] :return: n_cond_dur_ch[cond, dur, ch], ev_cond_fr_dim_meanvar[dcond, fr, dim, (mean, var)], ev_cond_dim[dcond, dim], dcond_tr[tr], durs[dur], ddur_tr[tr] """ nt0 = self.nt0 dt0 = self.dt0 n_ch_flat = self.n_ch subsample_factor = self.subsample_factor nt = int(nt0 // subsample_factor) durs, ddur_tr = np.unique(dur_tr, return_inverse=True) ddur_tr = ddur_tr.astype(np.int) n_dur = len(durs) durs = torch.tensor(durs) ddur_tr = torch.tensor(ddur_tr, dtype=torch.long) ch_tr_flat = consts.ch_by_dim2ch_flat(ch_tr_dim) ev_cond_dim, dcond_tr = np.unique(ev_tr_dim, return_inverse=True, axis=0) n_cond_flat = len(ev_cond_dim) ev_cond_fr_dim = torch.tensor(ev_cond_dim)[:, None, :].expand( [-1, nt, -1]) ev_cond_fr_dim_meanvar = torch.stack( [ev_cond_fr_dim, torch.zeros_like(ev_cond_fr_dim)], -1) n_cond_dur_ch = npt.tensor( npg.aggregate(np.stack([dcond_tr, npy(ddur_tr), ch_tr_flat]), 1., 'sum', [n_cond_flat, n_dur, n_ch_flat])) return n_cond_dur_ch, ev_cond_fr_dim_meanvar, ev_cond_dim, dcond_tr, \ durs, ddur_tr
def dat2p_dat( self, ch_tr_dim: np.ndarray, rt_sec: np.ndarray, ev_tr_dim: np.ndarray, ) -> (torch.Tensor, torch.Tensor, np.ndarray, np.ndarray): """ :param ch_tr_dim: [tr, dim] :param rt_sec: [tr] :param ev_tr_dim: [tr, dim] :return: n_cond_rt_ch[cond, rt, ch], ev_cond_fr_dim_meanvar[dcond, fr, dim, (mean, var)], ev_cond_dim[dcond, dim], dcond_tr[tr] """ nt0 = self.nt0 dt0 = self.dt0 n_ch_flat = self.n_ch subsample_factor = self.subsample_factor nt = int(nt0 // subsample_factor) dt = dt0 * subsample_factor drt = rt_sec2fr(rt_sec=rt_sec, dt=dt, nt=nt) ch_flat = consts.ch_by_dim2ch_flat(ch_tr_dim) ev_cond_dim, dcond_tr = unique_conds(ev_tr_dim) n_cond_flat = len(ev_cond_dim) ev_cond_fr_dim = torch.tensor(ev_cond_dim)[:, None, :].expand( [-1, nt, -1]) ev_cond_fr_dim_meanvar = torch.stack( [ev_cond_fr_dim, torch.zeros_like(ev_cond_fr_dim)], -1) n_cond_rt_ch = torch.tensor( npg.aggregate(np.stack([dcond_tr, drt, ch_flat.astype(np.long)]), 1., 'sum', [n_cond_flat, nt, n_ch_flat])) return n_cond_rt_ch, ev_cond_fr_dim_meanvar, ev_cond_dim, dcond_tr
def plot_p_tnd1(model, d=None, data_mode=None): fig = plt.figure('p_tnd', figsize=[4, 3.5]) gs = plt.GridSpec( nrows=2, ncols=2, left=0.2, right=0.95, bottom=0.25, top=0.95, ) for ch0 in range(consts.N_CH): for ch1 in range(consts.N_CH): ch_flat = consts.ch_by_dim2ch_flat(np.array([ch0, ch1])) ax = plt.subplot(gs[ch1, ch0]) # type: plt.Axes # noqa model.tnds[ch_flat].plot_p_tnd() ax.set_ylim(top=1) ax.set_yticks([0, 1]) if ch1 == 0: ax.set_xticklabels([]) ax.set_xlabel('') if ch0 == 1: ax.set_yticklabels([]) ax.set_ylabel('') return fig, d
def plot_fit_combined( data: Union[sim2d.Data2DRT, dict] = None, pModel_cond_rt_chFlat=None, model=None, pModel_dimRel_condDense_chFlat=None, # --- in place of data: pAll_cond_rt_chFlat=None, evAll_cond_dim=None, pTrain_cond_rt_chFlat=None, evTrain_cond_dim=None, pTest_cond_rt_chFlat=None, evTest_cond_dim=None, dt=None, # --- optional ev_dimRel_condDense_fr_dim_meanvar=None, dt_model=None, to_plot_internals=True, to_plot_params=True, to_plot_choice=True, # to_group_irr=False, group_dcond_irr=None, to_combine_ch_irr_cond=True, kw_plot_pred=(), kw_plot_pred_ch=(), kw_plot_data=(), axs=None, ): """ :param data: :param pModel_cond_rt_chFlat: :param model: :param pModel_dimRel_condDense_chFlat: :param ev_dimRel_condDense_fr_dim_meanvar: :param to_plot_internals: :param to_plot_params: :param to_group_irr: :param to_combine_ch_irr_cond: :param kw_plot_pred: :param kw_plot_data: :param axs: :return: """ if data is None: if pTrain_cond_rt_chFlat is None: pTrain_cond_rt_chFlat = pAll_cond_rt_chFlat if evTrain_cond_dim is None: evTrain_cond_dim = evAll_cond_dim if pTest_cond_rt_chFlat is None: pTest_cond_rt_chFlat = pAll_cond_rt_chFlat if evTest_cond_dim is None: evTest_cond_dim = evAll_cond_dim else: _, pAll_cond_rt_chFlat, _, _, evAll_cond_dim = \ data.get_data_by_cond('all') _, pTrain_cond_rt_chFlat, _, _, evTrain_cond_dim = data.get_data_by_cond( 'train_valid', mode_train='easiest') _, pTest_cond_rt_chFlat, _, _, evTest_cond_dim = data.get_data_by_cond( 'test', mode_train='easiest') dt = data.dt hs = {} if model is None: assert not to_plot_internals assert not to_plot_params if dt_model is None: if model is None: dt_model = dt else: dt_model = model.dt if axs is None: if to_plot_params: axs = plt2.GridAxes(3, 3) else: if to_plot_internals: axs = plt2.GridAxes(3, 3) else: if to_plot_choice: axs = plt2.GridAxes(2, 2) else: axs = plt2.GridAxes(1, 2) # TODO: beautify ratios rts = [] hs['rt'] = [] for dim_rel in range(consts.N_DIM): # --- data_pred may not have all conditions, so concatenate the rest # of the conditions so that the color scale is correct. Then also # concatenate p_rt_ch_data_pred1 with zeros so that nothing is # plotted in the concatenated. evTest_cond_dim1 = np.concatenate([ evTest_cond_dim, evAll_cond_dim ], axis=0) pTest_cond_rt_chFlat1 = np.concatenate([ pTest_cond_rt_chFlat, np.zeros_like(pAll_cond_rt_chFlat) ], axis=0) if ev_dimRel_condDense_fr_dim_meanvar is None: evModel_cond_dim = evAll_cond_dim else: if ev_dimRel_condDense_fr_dim_meanvar.ndim == 5: evModel_cond_dim = npy(ev_dimRel_condDense_fr_dim_meanvar[ dim_rel][:, 0, :, 0]) else: assert ev_dimRel_condDense_fr_dim_meanvar.ndim == 4 evModel_cond_dim = npy(ev_dimRel_condDense_fr_dim_meanvar[ dim_rel][:, 0, :]) pModel_cond_rt_chFlat = npy(pModel_dimRel_condDense_chFlat[dim_rel]) if to_plot_choice: # --- Plot choice ax = axs[0, dim_rel] plt.sca(ax) if to_combine_ch_irr_cond: ev_cond_model1, p_rt_ch_model1 = combine_irr_cond( dim_rel, evModel_cond_dim, pModel_cond_rt_chFlat ) sim2d.plot_p_ch_vs_ev(ev_cond_model1, p_rt_ch_model1, dim_rel=dim_rel, style='pred', group_dcond_irr=None, kw_plot=kw_plot_pred_ch, cmap=lambda n: lambda v: [0., 0., 0.], ) else: sim2d.plot_p_ch_vs_ev(evModel_cond_dim, pModel_cond_rt_chFlat, dim_rel=dim_rel, style='pred', group_dcond_irr=group_dcond_irr, kw_plot=kw_plot_pred, cmap=cmaps[dim_rel] ) hs, conds_irr = sim2d.plot_p_ch_vs_ev( evTest_cond_dim1, pTest_cond_rt_chFlat1, dim_rel=dim_rel, style='data_pred', group_dcond_irr=group_dcond_irr, cmap=cmaps[dim_rel], kw_plot=kw_plot_data, ) hs1 = [h[0] for h in hs] odim = 1 - dim_rel odim_name = consts.DIM_NAMES_LONG[odim] legend_odim(conds_irr, hs1, odim_name) sim2d.plot_p_ch_vs_ev(evTrain_cond_dim, pTrain_cond_rt_chFlat, dim_rel=dim_rel, style='data_fit', group_dcond_irr=group_dcond_irr, cmap=cmaps[dim_rel], kw_plot=kw_plot_data ) plt2.detach_axis('x', np.amin(evTrain_cond_dim[:, dim_rel]), np.amax(evTrain_cond_dim[:, dim_rel])) ax.set_xlabel('') ax.set_xticklabels([]) if dim_rel != 0: plt2.box_off(['left']) plt.yticks([]) ax.set_ylabel('P(%s choice)' % consts.CH_NAMES[dim_rel][1]) # --- Plot RT ax = axs[int(to_plot_choice) + 0, dim_rel] plt.sca(ax) hs1, rts1 = sim2d.plot_rt_vs_ev( evModel_cond_dim, pModel_cond_rt_chFlat, dim_rel=dim_rel, style='pred', group_dcond_irr=group_dcond_irr, dt=dt_model, kw_plot=kw_plot_pred, cmap=cmaps[dim_rel] ) hs['rt'].append(hs1) rts.append(rts1) sim2d.plot_rt_vs_ev(evTest_cond_dim1, pTest_cond_rt_chFlat1, dim_rel=dim_rel, style='data_pred', group_dcond_irr=group_dcond_irr, dt=dt, cmap=cmaps[dim_rel], kw_plot=kw_plot_data ) sim2d.plot_rt_vs_ev(evTrain_cond_dim, pTrain_cond_rt_chFlat, dim_rel=dim_rel, style='data_fit', group_dcond_irr=group_dcond_irr, dt=dt, cmap=cmaps[dim_rel], kw_plot=kw_plot_data ) plt2.detach_axis('x', np.amin(evTrain_cond_dim[:, dim_rel]), np.amax(evTrain_cond_dim[:, dim_rel])) if dim_rel != 0: ax.set_ylabel('') plt2.box_off(['left']) plt.yticks([]) ax.set_xlabel(consts.DIM_NAMES_LONG[dim_rel].lower() + ' strength') if dim_rel == 0: ax.set_ylabel('RT (s)') if to_plot_internals: for ch1 in range(consts.N_CH): ch0 = dim_rel ax = axs[3 + ch1, dim_rel] plt.sca(ax) ch_flat = consts.ch_by_dim2ch_flat(np.array([ch0, ch1])) model.tnds[ch_flat].plot_p_tnd() ax.set_xlabel('') ax.set_xticklabels([]) ax.set_yticks([0, 1]) if ch0 > 0: ax.set_yticklabels([]) ax.set_ylabel(r"$\mathrm{P}(T^\mathrm{n} \mid" " \mathbf{z}=[%d,%d])$" % (ch0, ch1)) ax = axs[5, dim_rel] plt.sca(ax) if hasattr(model.dtb, 'dtb1ds'): model.dtb.dtb1ds[dim_rel].plot_bound(color='k') plt2.sameaxes(axs[-1, :consts.N_DIM], xy='y') if to_plot_params: ax = axs[0, -1] plt.sca(ax) model.plot_params() return axs, rts, hs