def plot_weights(ax, Coefs, prds=None, xlim=None, xlab=tlab, ylab='unit coefficient', title='', ytitle=1.04): """Plot decoding weights.""" # Unstack dataframe with results. lCoefs = pd.DataFrame(Coefs.unstack().unstack(), columns=['coef']) lCoefs['time'] = lCoefs.index.get_level_values(0) lCoefs['value'] = lCoefs.index.get_level_values(1) lCoefs['uid'] = lCoefs.index.get_level_values(2) lCoefs.index = np.arange(len(lCoefs.index)) # Plot time series. sns.tsplot(lCoefs, time='time', value='coef', unit='value', condition='uid', ax=ax) # Add chance level line and stimulus periods. putil.add_chance_level(ax=ax, ylevel=0) putil.plot_periods(prds, ax=ax) # Set axis limits. xlim = xlim if xlim is not None else tlim putil.set_limits(ax, xlim) # Format plot. putil.set_labels(ax, xlab, ylab, title, ytitle) putil.hide_legend(ax)
def plot_prd_spike_count(spk_cnt, prd, sps, fig): """Plot spike count results for period.""" # Init axes. ax_total, ax_per_tr, ax_mean = putil.sps_add_axes(fig, sps, 1, 3) # Plot total spike count histogram. plot_spike_count_hist(spk_cnt.unstack(), prd + ', all trials', ax_total) # Per trial. cols = sns.color_palette('Spectral', len(spk_cnt.index)) for (itr, nspk), col in zip(spk_cnt.iterrows(), cols): plot_spike_count_hist(nspk, prd + ', per trial', ax_per_tr, False, 1.0, col, 1) # Mean over time. x = spk_cnt.index y = spk_cnt.median(1) pplot.lines(x, y, xlim=[x.min(), x.max()], xlab='trial index', ylab='mean spike counts', title='median over time', ax=ax_mean) # Format figures. for ax in (ax_total, ax_per_tr): putil.set_limits(ax, xlim=[-0.5, None], ylim=[0, None]) return ax_total, ax_per_tr, ax_mean
def plot_mean_rates(mRates, aa_res_dir, tasks=None, task_lbls=None, baseline=None, xlim=None, ylim=None, ci=68, ffig=None, figsize=(6, 4)): """Plot mean rates across tasks.""" # Init. if tasks is None: tasks = mRates.keys() # Plot mean activity. lRates = [] for task in tasks: lrates = pd.DataFrame(mRates[task].unstack(), columns=['rate']) lrates['task'] = task lRates.append(lrates) lRates = pd.concat(lRates) lRates['time'] = lRates.index.get_level_values(0) lRates['unit'] = lRates.index.get_level_values(1) if task_lbls is not None: lRates.task.replace(task_lbls, inplace=True) # Plot as time series. #putil.set_style('notebook', 'white') fig = putil.figure(figsize=figsize) ax = putil.axes() sns.tsplot(lRates, time='time', value='rate', unit='unit', condition='task', ci=ci, ax=ax) # Add periods and baseline. putil.plot_periods(ax=ax) if baseline is not None: putil.add_baseline(baseline, ax=ax) # Format plot. sns.despine(ax=ax) putil.set_labels(ax, xlab='time since S1 onset', ylab='rate (sp/s)') putil.set_limits(ax, xlim, ylim) putil.hide_legend_title(ax) # Save plot. putil.save_fig(ffig, fig) return fig, ax
def raster(spk_trains, t_unit=ms, prds=None, c='b', xlim=None, title=None, xlab=None, ylab=None, ffig=None, ax=None): """Plot rasterplot.""" # Init. ax = putil.axes(ax) putil.plot_periods(prds, ax=ax) putil.set_limits(ax, xlim) # There's nothing to plot. if not len(spk_trains): return ax # Plot raster. for i, spk_tr in enumerate(spk_trains): x = np.array(spk_tr.rescale(t_unit)) y = (i + 1) * np.ones_like(x) # Spike markers are plotted in absolute size (figure coordinates). # ax.scatter(x, y, c=c, s=1.8, edgecolor=c, marker='|') # Spike markers are plotted in relative size (axis coordinates) patches = [ Rectangle((xi - wsp / 2, yi - hsp / 2), wsp, hsp) for xi, yi in zip(x, y) ] collection = PatchCollection(patches, facecolor=c, edgecolor=c) ax.add_collection(collection) # Format plot. ylim = [0.5, len(spk_trains) + 0.5] if len(spk_trains) else [0, 1] if xlab is not None: xlab = putil.t_lbl.format(xlab) putil.format_plot(ax, xlim, ylim, xlab, ylab, title) putil.hide_axes(ax, show_x=True) putil.hide_spines(ax) # Order trials from top to bottom, only after setting axis limits. ax.invert_yaxis() # Save and return plot. putil.save_fig(ffig) return ax
def plot_RF_results(RF_res, stims, fdir, sup_title): """Plot receptive field results.""" # Plot distribution of coverage values. putil.set_style('poster', 'white') fig = putil.figure() ax = putil.axes() sns.distplot(RF_res.S1_cover, bins=np.arange(0, 1.01, 0.1), kde=False, rug=True, ax=ax) putil.set_limits(ax, [0, 1]) fst = util.format_to_fname(sup_title) ffig = fdir + fst + '_S1_coverage.png' putil.save_fig(ffig, fig, sup_title) # Plot RF coverage and rate during S1 on regression plot for each # recording and task. tasks = RF_res.index.get_level_values(-1).unique() for vname, ylim in [('mean_rate', [0, None]), ('max_rate', [0, None]), ('mDSI', [0, 1])]: fig, gs, axes = putil.get_gs_subplots(nrow=len(stims), ncol=len(tasks), subw=4, subh=4, ax_kws_list=None, create_axes=True) colors = sns.color_palette('muted', len(tasks)) for istim, stim in enumerate(stims): for itask, task in enumerate(tasks): # Plot regression plot. ax = axes[istim, itask] scov, sval = [stim + '_' + name for name in ('cover', vname)] df = RF_res.xs(task, level=-1) sns.regplot(scov, sval, df, color=colors[itask], ax=ax) # Add unit labels. uids = df.index.droplevel(0) putil.add_unit_labels(ax, uids, df[scov], df[sval]) # Add stats. r, p = sp.stats.pearsonr(df[sval], df[scov]) pstr = util.format_pvalue(p) txt = 'r = {:.2f}, {}'.format(r, pstr) ax.text(0.02, 0.98, txt, va='top', ha='left', transform=ax.transAxes) # Set labels. title = '{} {}'.format(task, stim) xlab, ylab = [sn.replace('_', ' ') for sn in (scov, sval)] putil.set_labels(ax, xlab, ylab, title) # Set limits. xlim = [0, 1] putil.set_limits(ax, xlim, ylim) # Save plot. fst = util.format_to_fname(sup_title) fname = '{}_cover_{}.png'.format(fst, vname) ffig = util.join([fdir, vname, fname]) putil.save_fig(ffig, fig, sup_title)
def plot_ROC_mean(d_faroc, t1=None, t2=None, ylim=None, colors=None, ylab='AROC', ffig=None): """Plot mean ROC curves over given period.""" # Import results. d_aroc = {} for name, faroc in d_faroc.items(): aroc = util.read_objects(faroc, 'aroc') d_aroc[name] = aroc.unstack().T # Format results. laroc = pd.DataFrame(pd.concat(d_aroc), columns=['aroc']) laroc['task'] = laroc.index.get_level_values(0) laroc['time'] = laroc.index.get_level_values(1) laroc['unit'] = laroc.index.get_level_values(2) laroc.index = np.arange(len(laroc.index)) # Init figure. fig = putil.figure(figsize=(6, 6)) ax = sns.tsplot(laroc, time='time', value='aroc', unit='unit', condition='task', color=colors) # Highlight stimulus periods. putil.plot_periods(ax=ax) # Plot mean results. [ax.lines[i].set_linewidth(3) for i in range(len(ax.lines))] # Add chance level line. putil.add_chance_level(ax=ax, alpha=0.8, color='k') ax.lines[-1].set_linewidth(1.5) # Format plot. xlab = 'Time since S1 onset (ms)' putil.set_labels(ax, xlab, ylab) putil.set_limits(ax, [t1, t2], ylim) putil.set_spines(ax, bottom=True, left=True, top=False, right=False) putil.set_legend(ax, loc=0) # Save plot. putil.save_fig(ffig, fig, ytitle=1.05, w_pad=15)
def plot_scores(ax, Scores, Perm=None, Psdo=None, nvals=None, prds=None, col='b', perm_col='grey', psdo_col='g', xlim=None, ylim=ylim_scr, xlab=tlab, ylab=ylab_scr, title='', ytitle=1.04): """Plot decoding accuracy results.""" lgn_patches = [] # Plot permuted results (if exist). if not util.is_null(Perm) and not Perm.isnull().all().all(): x, pval = Perm.columns, Perm.loc['pval'] ymean, ystd = Perm.loc['mean'], Perm.loc['std'] plot_mean_std_sdiff(x, ymean, ystd, pval, pth=0.01, lw=6, color=perm_col, ax=ax) lgn_patches.append(putil.get_artist('permuted', perm_col)) # Plot population shuffled results (if exist). if not util.is_null(Psdo) and not Psdo.isnull().all().all(): x, pval = Psdo.columns, Psdo.loc['pval'] ymean, ystd = Psdo.loc['mean'], Psdo.loc['std'] plot_mean_std_sdiff(x, ymean, ystd, pval, pth=0.01, lw=3, color=psdo_col, ax=ax) lgn_patches.append(putil.get_artist('pseudo-population', psdo_col)) # Plot scores. plot_score_set(Scores, ax, color=col) lgn_patches.append(putil.get_artist('synchronous', col)) # Add legend. lgn_patches = lgn_patches[::-1] putil.set_legend(ax, handles=lgn_patches) # Add chance level line. # This currently plots all nvals combined across stimulus period! if nvals is not None: chance_lvl = 1.0 / nvals putil.add_chance_level(ax=ax, ylevel=chance_lvl) # Add stimulus periods. if prds is not None: putil.plot_periods(prds, ax=ax) # Set axis limits. xlim = xlim if xlim is not None else tlim putil.set_limits(ax, xlim, ylim) # Format plot. putil.set_labels(ax, xlab, ylab, title, ytitle)
def sign_scatter(v1, v2, pvals=None, pth=0.01, scol='g', nscol='k', id_line=False, fit_reg=False, ax=None): """Plot scatter plot with significant points highlighted.""" # Init. ax = putil.axes(ax) s_pars = (True, scol, {'alpha': 1.0}) ns_pars = (False, nscol, {'alpha': 0.8}) # Binarize significance stats. vsig = (pvals < pth if pvals is not None else pd.Series(True, index=v1.index)) # Plot significant and non-significant points. for b, c, a in [ns_pars, s_pars]: if (vsig == b).any(): sns.regplot(v1.loc[vsig == b], v2.loc[vsig == b], fit_reg=fit_reg, color=c, scatter_kws=a, ax=ax) # Format plot. sns.despine(ax=ax) # Add identity line. if id_line: v_max = max(ax.get_xlim()[1], ax.get_ylim()[1]) putil.set_limits(ax, [0, v_max], [0, v_max]) putil.add_identity_line(ax=ax, equal_xy=True) return ax
def rate(rate_list, names=None, prds=None, evts=None, cols=None, baseline=None, pval=0.05, test='mann_whitney_u', test_kws=None, xlim=None, ylim=None, title=None, xlab=None, ylab=putil.FR_lbl, add_lgn=True, lgn_lbl='trs', ffig=None, ax=None): """Plot firing rate.""" # Init. ax = putil.axes(ax) if test_kws is None: test_kws = dict() # Plot periods and baseline first. putil.plot_periods(prds, ax=ax) if baseline is not None: putil.add_baseline(baseline, ax=ax) putil.set_limits(ax, xlim) if not len(rate_list): return ax if cols is None: cols = putil.get_colors(as_cycle=False) if names is None: names = len(rate_list) * [''] # Iterate through list of rate arrays xmin, xmax, ymax = None, None, None for i, rts in enumerate(rate_list): # Init. name = names[i] col = cols[i] # Skip empty array (no trials). if not rts.shape[0]: continue # Set line label. Convert to Numpy array to format floats nicely. lbl = str(np.array(name)) if util.is_iterable(name) else str(name) if lgn_lbl is not None: lbl += ' ({} {})'.format(rts.shape[0], lgn_lbl) # Plot mean +- SEM of rate vectors. tvec, meanr, semr = rts.columns, rts.mean(), rts.sem() ax.plot(tvec, meanr, label=lbl, color=col) ax.fill_between(tvec, meanr - semr, meanr + semr, alpha=0.2, facecolor=col, edgecolor=col) # Update limits. tmin, tmax, rmax = tvec.min(), tvec.max(), (meanr + semr).max() xmin = np.min([xmin, tmin]) if xmin is not None else tmin xmax = np.max([xmax, tmax]) if xmax is not None else tmax ymax = np.max([ymax, rmax]) if ymax is not None else rmax # Set ticks, labels and axis limits. if xlim is None: if xmin == xmax: # avoid setting identical limits xmax = None xlim = (xmin, xmax) if ylim is None: ymax = 1.02 * ymax if (ymax is not None) and (ymax > 0) else None ylim = (0, ymax) if xlab is not None: xlab = putil.t_lbl.format(xlab) putil.format_plot(ax, xlim, ylim, xlab, ylab, title) t1, t2 = ax.get_xlim() # in case it was set to None tmarks, tlbls = putil.get_tick_marks_and_labels(t1, t2) putil.set_xtick_labels(ax, tmarks, tlbls) putil.set_max_n_ticks(ax, 7, 'y') # Add legend. if add_lgn and len(rate_list): putil.set_legend(ax, loc=1, borderaxespad=0.0, handletextpad=0.4, handlelength=0.6) # Add significance line to top of axes. if (pval is not None) and (len(rate_list) == 2): rates1, rates2 = rate_list sign_prds = stats.sign_periods(rates1, rates2, pval, test, **test_kws) putil.plot_signif_prds(sign_prds, color='m', linewidth=4.0, ax=ax) # Plot event markers. putil.plot_event_markers(evts, ax=ax) # Save and return plot. putil.save_fig(ffig) return ax
def plot_group_violin(res, x, y, groups=None, npval=None, pth=0.01, color='grey', ylim=None, ylab=None, ffig=None): """Plot group-wise results on violin plots.""" if groups is None: groups = res['group'].unique() # Test difference from zero in each groups. ttest_res = { group: sp.stats.ttest_1samp(gres[y], 0) for group, gres in res.groupby(x) } ttest_res = pd.DataFrame.from_dict(ttest_res, 'index') # Binarize significance test. res['is_sign'] = res[npval] < pth if npval is not None else True res['direction'] = np.sign(res[y]) # Set up figure and plot data. fig = putil.figure() ax = putil.axes() putil.add_baseline(ax=ax) sns.violinplot(x=x, y=y, data=res, inner=None, order=groups, ax=ax) sns.swarmplot(x=x, y=y, hue='is_sign', data=res, color=color, order=groups, hue_order=[True, False], ax=ax) putil.set_labels(ax, xlab='', ylab=ylab) putil.set_limits(ax, ylim=ylim) putil.hide_legend(ax) # Add annotations. ymin, ymax = ax.get_ylim() ylvl = ymax for i, group in enumerate(groups): gres = res.loc[res.group == group] # Mean. mean_str = 'Mean:\n' if i == 0 else '\n' mean_str += '{:.2f}'.format(gres[y].mean()) # Non-zero test of distribution. str_pval = util.format_pvalue(ttest_res.loc[group, 'pvalue']) mean_str += '\n({})'.format(str_pval) # Stats on difference from baseline. nnonsign, ntot = (~gres.is_sign).sum(), len(gres) npos, nneg = [ sum(gres.is_sign & (gres.direction == d)) for d in (1, -1) ] sign_to_report = [('+', npos), ('=', nnonsign), ('-', nneg)] nsign_str = '' for symb, n in sign_to_report: prc = str(int(round(100 * n / ntot))) nsign_str += '{} {:>3} / {} ({:>2}%)\n'.format( symb, int(n), ntot, prc) lbl = '{}\n\n{}'.format(mean_str, nsign_str) ax.text(i, ylvl, lbl, fontsize='smaller', va='bottom', ha='center') # Save plot. putil.save_fig(ffig, fig) return fig, ax
def plot_combined_rec_mean(recs, stims, res_dir, par_kws, list_n_most_DS, list_min_nunits, n_boot=1e4, ci=95, tasks=None, task_labels=None, add_title=True, fig=None): """Test and plot results combined across sessions.""" # Init. # putil.set_style('notebook', 'ticks') vkey = 'all' # This should be made more explicit! prds = [[stim] + list(constants.fixed_tr_prds.loc[stim]) for stim in stims] # Load all results to plot. dict_rt_res = decutil.load_res(res_dir, list_n_most_DS, **par_kws) # Create figures. fig_scr, _, axs_scr = putil.get_gs_subplots(nrow=len(dict_rt_res), ncol=len(list_min_nunits), subw=8, subh=6, fig=fig, create_axes=True) # Query data. allScores = {} allnunits = {} for n_most_DS, rt_res in dict_rt_res.items(): # Get accuracy scores. dScores = {(rec, task): res[vkey]['Scores'].mean() for (rec, task), res in rt_res.items() if (vkey in res) and (res[vkey] is not None)} allScores[n_most_DS] = pd.concat(dScores, axis=1).T # Get number of units. allnunits[n_most_DS] = {(rec, task): res[vkey]['nunits'].iloc[0] for (rec, task), res in rt_res.items() if (vkey in res) and (res[vkey] is not None)} # Get # values (for baseline plotting.) all_nvals = pd.Series({(rec, task): res[vkey]['nclasses'].iloc[0] for (rec, task), res in rt_res.items() if (vkey in res) and (res[vkey] is not None)}) un_nvals = all_nvals.unique() if len(un_nvals) > 1 and verbose: print('Found multiple # of classes to decode: {}'.format(un_nvals)) nvals = un_nvals[0] allnunits = pd.DataFrame(allnunits) # Plot mean performance across recordings and # test significance by bootstrapping. for inmost, n_most_DS in enumerate(list_n_most_DS): Scores = allScores[n_most_DS] nunits = allnunits[n_most_DS] for iminu, min_nunits in enumerate(list_min_nunits): ax_scr = axs_scr[inmost, iminu] # Select only recordings with minimum number of units. sel_rt = nunits.index[nunits >= min_nunits] nScores = Scores.loc[sel_rt].copy() # Nothing to plot. if nScores.empty: ax_scr.axis('off') continue # Prepare data. if tasks is None: tasks = nScores.index.get_level_values(1).unique() # in data if task_labels is None: task_labels = {task: task for task in tasks} dScores = {task: pd.DataFrame(nScores.xs(task, level=1).unstack(), columns=['accuracy']) for task in tasks} lScores = pd.concat(dScores, axis=0) lScores['time'] = lScores.index.get_level_values(1) lScores['task'] = lScores.index.get_level_values(0) lScores['rec'] = lScores.index.get_level_values(2) lScores.index = np.arange(len(lScores.index)) lScores.task.replace(task_labels, inplace=True) # Add altered task names for legend plotting. nrecs = {task_labels[task]: len(nScores.xs(task, level=1)) for task in tasks} my_format = lambda x: '{} (n={})'.format(x, nrecs[x]) lScores['task_nrecs'] = lScores['task'].apply(my_format) # Plot as time series. sns.tsplot(lScores, time='time', value='accuracy', unit='rec', condition='task_nrecs', ci=ci, n_boot=n_boot, ax=ax_scr) # Add chance level line. chance_lvl = 1.0 / nvals putil.add_chance_level(ax=ax_scr, ylevel=chance_lvl) # Add stimulus periods. putil.plot_periods(prds, ax=ax_scr) # Set axis limits. putil.set_limits(ax_scr, tlim) # Format plot. title = ('{} most DS units'.format(n_most_DS) if n_most_DS != 0 else 'all units') title += (', recordings with at least {} units'.format(min_nunits) if (min_nunits > 1 and len(list_min_nunits) > 1) else '') ytitle = 1.0 putil.set_labels(ax_scr, tlab, ylab_scr, title, ytitle) putil.hide_legend_title(ax_scr) # Match axes across decoding plots. [putil.sync_axes(axs_scr[inmost, :], sync_y=True) for inmost in range(axs_scr.shape[0])] # Save plots. list_n_most_DS_str = [str(i) if i != 0 else 'all' for i in list_n_most_DS] par_kws['n_most_DS'] = ', '.join(list_n_most_DS_str) title = '' if add_title: title = decutil.fig_title(res_dir, **par_kws) title += '\n{}% CE with {} bootstrapped subsamples'.format(ci, int(n_boot)) fs_title = 'large' w_pad, h_pad = 3, 3 par_kws['n_most_DS'] = '_'.join(list_n_most_DS_str) ffig = decutil.fig_fname(res_dir, 'combined_score', fformat, **par_kws) putil.save_fig(ffig, fig_scr, title, fs_title, w_pad=w_pad, h_pad=h_pad) return fig_scr, axs_scr, ffig
def plot_scores_across_nunits(recs, stims, res_dir, list_n_most_DS, par_kws): """ Plot prediction score results across different number of units included. """ # Init. putil.set_style('notebook', 'ticks') tasks = par_kws['tasks'] # Remove Passive if plotting Saccade or Correct. if par_kws['feat'] in ['saccade', 'correct']: tasks = tasks[~tasks.str.contains('Pas')] # Load all results to plot. dict_rt_res = decutil.load_res(res_dir, list_n_most_DS, **par_kws) # Create figures. fig_scr, _, axs_scr = putil.get_gs_subplots(nrow=len(recs), ncol=len(tasks), subw=8, subh=6, create_axes=True) # Do plotting per recording and task. for irec, rec in enumerate(recs): if verbose: print('\n' + rec) for itask, task in enumerate(tasks): if verbose: print(' ' + task) ax_scr = axs_scr[irec, itask] # Init data. dict_lScores = {} cols = sns.color_palette('hls', len(dict_rt_res.keys())) lncls = [] for (n_most_DS, rt_res), col in zip(dict_rt_res.items(), cols): # Check if results exist for rec-task combination. if (((rec, task) not in rt_res.keys()) or (not len(rt_res[(rec, task)].keys()))): continue res = rt_res[(rec, task)] for v, col in zip(res.keys(), cols): vres = res[v] Scores = vres['Scores'] lncls.append(vres['nclasses']) # Unstack dataframe with results. lScores = pd.DataFrame(Scores.unstack(), columns=['score']) lScores['time'] = lScores.index.get_level_values(0) lScores['fold'] = lScores.index.get_level_values(1) lScores.index = np.arange(len(lScores.index)) # Get number of units tested. nunits = vres['nunits'] uni_nunits = nunits.unique() if len(uni_nunits) > 1 and verbose: print('Different number of units found.') nunits = uni_nunits[0] # Collect results. dict_lScores[(nunits, v)] = lScores # Skip rest if no data is available. # Check if any result exists for rec-task combination. if not len(dict_lScores): ax_scr.axis('off') continue # Concatenate accuracy scores from every recording. all_lScores = pd.concat(dict_lScores) all_lScores['n_most_DS'] = all_lScores.index.get_level_values(0) all_lScores.index = np.arange(len(all_lScores.index)) # Plot decoding results. nnunits = len(all_lScores['n_most_DS'].unique()) title = '{} {}, {} sets of units'.format(' '.join(rec), task, nnunits) ytitle = 1.0 prds = [[stim] + list(constants.fixed_tr_prds.loc[stim]) for stim in stims] # Plot time series. palette = sns.color_palette('muted') sns.tsplot(all_lScores, time='time', value='score', unit='fold', condition='n_most_DS', color=palette, ax=ax_scr) # Add chance level line. # This currently plots a chance level line for every nvals, # combined across stimulus period! uni_ncls = np.unique(np.array(lncls).flatten()) if len(uni_ncls) > 1 and verbose: print('Different number of classes found.') for nvals in uni_ncls: chance_lvl = 1.0 / nvals putil.add_chance_level(ax=ax_scr, ylevel=chance_lvl) # Add stimulus periods. if prds is not None: putil.plot_periods(prds, ax=ax_scr) # Set axis limits. putil.set_limits(ax_scr, tlim, ylim_scr) # Format plot. putil.set_labels(ax_scr, tlab, ylab_scr, title, ytitle) # Match axes across decoding plots. # [putil.sync_axes(axs_scr[:, itask], sync_y=True) # for itask in range(axs_scr.shape[1])] # Save plots. list_n_most_DS_str = [str(i) if i != 0 else 'all' for i in list_n_most_DS] par_kws['n_most_DS'] = ', '.join(list_n_most_DS_str) title = decutil.fig_title(res_dir, **par_kws) fs_title = 'large' w_pad, h_pad = 3, 3 par_kws['n_most_DS'] = '_'.join(list_n_most_DS_str) ffig = decutil.fig_fname(res_dir, 'score_nunits', fformat, **par_kws) putil.save_fig(ffig, fig_scr, title, fs_title, w_pad=w_pad, h_pad=h_pad)
def plot_score_multi_rec(recs, stims, res_dir, par_kws): """Plot prediction scores for multiple recordings.""" # Init. putil.set_style('notebook', 'ticks') n_most_DS = par_kws['n_most_DS'] tasks = par_kws['tasks'] # Remove Passive if plotting Saccade or Correct. if par_kws['feat'] in ['saccade', 'correct']: tasks = tasks[~tasks.str.contains('Pas')] # Load results. rt_res = decutil.load_res(res_dir, **par_kws)[n_most_DS] # Create figure. ret = putil.get_gs_subplots(nrow=1, ncol=len(tasks), subw=8, subh=6, create_axes=True) fig_scr, _, axs_scr = ret print('\nPlotting multi-recording results...') for itask, task in enumerate(tasks): if verbose: print(' ' + task) ax_scr = axs_scr[0, itask] dict_lScores = {} for irec, rec in enumerate(recs): # Check if results exist for rec-task combination. if (((rec, task) not in rt_res.keys()) or (not len(rt_res[(rec, task)].keys()))): continue # Init data. res = rt_res[(rec, task)] cols = sns.color_palette('hls', len(res.keys())) lncls = [] for v, col in zip(res.keys(), cols): vres = res[v] if vres is None: continue Scores = vres['Scores'] lncls.append(vres['nclasses']) # Unstack dataframe with results. lScores = pd.DataFrame(Scores.unstack(), columns=['score']) lScores['time'] = lScores.index.get_level_values(0) lScores['fold'] = lScores.index.get_level_values(1) lScores.index = np.arange(len(lScores.index)) dict_lScores[(rec, v)] = lScores if not len(dict_lScores): ax_scr.axis('off') continue # Concatenate accuracy scores from every recording. all_lScores = pd.concat(dict_lScores) all_lScores['rec'] = all_lScores.index.get_level_values(0) all_lScores['rec'] = all_lScores['rec'].str.join(' ') # format label all_lScores.index = np.arange(len(all_lScores.index)) # Plot decoding results. nrec = len(all_lScores['rec'].unique()) title = '{}, {} recordings'.format(task, nrec) ytitle = 1.0 prds = [[stim] + list(constants.fixed_tr_prds.loc[stim]) for stim in stims] # Plot time series. palette = sns.color_palette('muted') sns.tsplot(all_lScores, time='time', value='score', unit='fold', condition='rec', color=palette, ax=ax_scr) # Add chance level line. # This currently plots a chance level line for every nvals, # combined across stimulus period! uni_ncls = np.unique(np.array(lncls).flatten()) if len(uni_ncls) > 1 and verbose: print('Different number of classes found.') for nvals in uni_ncls: chance_lvl = 1.0 / nvals putil.add_chance_level(ax=ax_scr, ylevel=chance_lvl) # Add stimulus periods. if prds is not None: putil.plot_periods(prds, ax=ax_scr) # Set axis limits. putil.set_limits(ax_scr, tlim, ylim_scr) # Format plot. putil.set_labels(ax_scr, tlab, ylab_scr, title, ytitle) # Save figure. title = decutil.fig_title(res_dir, **par_kws) fs_title = 'large' w_pad, h_pad = 3, 3 ffig = decutil.fig_fname(res_dir, 'all_scores', fformat, **par_kws) putil.save_fig(ffig, fig_scr, title, fs_title, w_pad=w_pad, h_pad=h_pad)
def rec_stability_test(UA, fname=None, periods=None): """Check stability of recording session across tasks.""" # Init. if periods is None: periods = ['whole trial', 'fixation'] # Init figure. fig, gsp, axs = putil.get_gs_subplots(nrow=len(periods), ncol=1, subw=10, subh=2.5, create_axes=True, as_array=False) for prd, ax in zip(periods, axs): # Calculate and plot firing rate during given period in each trial # across session for all units. colors = putil.get_colors() task_stats = pd.DataFrame(columns=['t_start', 't_stops', 'label']) for task, color in zip(UA.tasks(), colors): # Get activity of all units in task. tr_rates = [] for u in UA.iter_thru([task]): rates = u.get_prd_rates(prd, tr_time_idx=True) tr_rates.append(util.remove_dim_from_series(rates)) tr_rates = pd.DataFrame(tr_rates) # Not (non-empty and included) unit during task. if not len(tr_rates.index): continue # Plot each rate in task. tr_times = tr_rates.columns pplot.lines(tr_times, tr_rates.T, zorder=1, alpha=0.5, color=color, ax=ax) # Plot mean +- sem rate. tr_time = tr_rates.columns mean_rate, sem_rate = tr_rates.mean(), tr_rates.std() lower, upper = mean_rate-sem_rate, mean_rate+sem_rate lower[lower < 0] = 0 # remove negative values ax.fill_between(tr_time, lower, upper, zorder=2, alpha=.5, facecolor='grey', edgecolor='grey') pplot.lines(tr_time, mean_rate, lw=2, color='k', ax=ax) # Add task stats. task_lbl = '{}, {} units'.format(task, len(tr_rates.index)) # Add grand mean FR. task_lbl += '\nFR: {:.1f} sp/s'.format(tr_rates.mean().mean()) # Calculate linear trend to test gradual drift. slope, _, _, p_value, _ = sp.stats.linregress(tr_times, mean_rate) slope = 3600*slope # convert to change in spike per hour pval = util.format_pvalue(p_value, max_digit=3) task_lbl += '\n$\delta$FR: {:.1f} sp/s/h'.format(slope) task_lbl += '\n{}'.format(pval) task_stats.loc[task] = (tr_times.min(), tr_times.max(), task_lbl) # Set axes limits. tmin, tmax = task_stats.t_start.min(), task_stats.t_stops.max() putil.set_limits(ax, xlim=(tmin, tmax)) # Add task labels after all tasks have been plotted. putil.plot_events(task_stats[['t_start', 'label']], y_lbl=0.75, lbl_ha='left', lbl_rotation=0, ax=ax) # Format plot. xlab = 'Recording time (s)' if prd == periods[-1] else None putil.set_labels(ax, xlab=xlab, ylab=prd) putil.set_spines(ax, left=False) # Save figure. title = 'Recording stability of ' + UA.Name putil.save_fig(fname, fig, title)