Beispiel #1
0
def regr_unit_anticipation(u, nrate, prd, t1_offset, t2_offset, max_len):
    """Calculate anticipatory results for unit."""

    # Init.
    print(u.Name)
    trs = u.inc_trials()
    t1s, t2s = u.pr_times(prd, trs, concat=False)
    t1s = t1s + t1_offset
    t2s = t2s + t2_offset

    rates = u._Rates[nrate].get_rates(trs, t1s, t2s, t1s)
    rates = rates.loc[:, rates.columns <= max_len]

    # Fit linear regression.
    lrates = rates.unstack()
    lrates.dropna(inplace=True)
    x = np.array(lrates.index.get_level_values(0)) / 1000  # ms -> sec
    y = np.array(lrates)
    fit = sp.stats.linregress(x, y)
    fit_res = {
        fld: getattr(fit, fld)
        for fld in ['slope', 'intercept', 'rvalue', 'pvalue', 'stderr']
    }

    # Test difference from baseline.
    bs_rates = u.get_prd_rates('baseline', trs)
    bs_rates = util.remove_dim_from_series(bs_rates)
    t1s = t2s - 100 * ms  # 200 ms interval preceding stimulus
    prestim_rates = u._Spikes.rates(trs, t1s, t2s)
    prestim_rates = util.remove_dim_from_series(prestim_rates)
    fit_res['base_diff_pval'] = stats.wilcoxon_test(bs_rates, prestim_rates)[1]

    return fit_res
Beispiel #2
0
def export_decoding_data(UA, fname, rec, task, trs, uids, prd, nrate):
    """Export decoding data into .mat file."""

    # Below inits rely on these params being the same across units, which is
    # only true when exporting a single task of a single recording!

    if uids is None:
        uids = UA.uids([task])[rec]

    u = UA.get_unit(uids[0], task)
    t1s, t2s = u.pr_times(prd, trs, add_latency=False, concat=False)
    prd_str = constants.tr_prds.loc[prd, 'start']
    ref_ev = constants.tr_evts.loc[prd_str, 'rel to']
    ref_ts = u.ev_times(ref_ev)
    if nrate is None:
        nrate = u.init_nrate()

    # Trial params.
    trpars = np.array([util.remove_dim_from_series(u.TrData[par][trs])
                       for par in u.TrData]).T
    trpar_names = ['_'.join(col) if util.is_iterable(col) else col
                   for col in u.TrData.columns]

    # Trial events.
    tr_evts = u.Events
    trevn_names = tr_evts.columns.tolist()
    tr_evts = np.array([util.remove_dim_from_series(tr_evts.loc[trs, evn])
                       for evn in tr_evts]).T

    # Rates.
    rates = np.array([np.array(u._Rates[nrate].get_rates(trs, t1s, t2s))
                      for u in UA.iter_thru([task], uids)])

    # Sampling times.
    times = np.array(u._Rates[nrate].get_rates(trs, t1s, t2s, ref_ts).columns)

    # Create dictionary to export.
    export_dict = {'recording': rec, 'task': task,
                   'period': prd, 'nrate': nrate,
                   'trial_parameter_names': trpar_names,
                   'trial_parameters': trpars,
                   'trial_event_names': trevn_names,
                   'trial_events': tr_evts,
                   'times': times, 'rates': rates}

    # Export data.
    util.write_matlab_object(fname, export_dict)
Beispiel #3
0
    def get_time_rates(self, trs=None, t1s=None, t2s=None, tr_time_idx=False):
        """Return rates within time window in given trials."""

        if self.is_empty():
            return

        # Init trials.
        trs, t1s, t2s = self.get_trial_params(trs, t1s, t2s)

        # Get rates.
        rates = self._Spikes.rates(trs, t1s, t2s)

        # Change index from trial index to trials start times.
        if tr_time_idx:
            tr_time = self.TrData.TrialStart[trs]
            rates.index = util.remove_dim_from_series(tr_time)

        return rates
Beispiel #4
0
    def get_stim_resp_vals(self, stim, feat, t1s=None, t2s=None,
                           add_latency=True):
        """Return response to different values of stimulus feature."""

        # Init time period (stimulus on).
        if t1s is None and t2s is None:
            t1s, t2s = self.pr_times(stim, None, add_latency, concat=False)

        # Get response (firing rates) in each trial.
        trs = self.trials_by_param((stim, feat))
        if not len(trs):
            return pd.DataFrame(columns=['vals', 'resp'])
        vals = pd.concat([pd.Series(v, index=tr)
                          for v, tr in trs.iteritems()])
        resp = self._Spikes.rates(self.inc_trials(), t1s, t2s)
        stim_resp = pd.DataFrame({'vals': vals, 'resp': resp})
        stim_resp.resp = util.remove_dim_from_series(stim_resp.resp)

        return stim_resp
Beispiel #5
0
def PD_across_units(UA, UInc, utids=None, fres=None, ffig=None):
    """
    Test consistency/spread of PD across units per recording.
    What is the spread in the preferred directions across units?

    Return population level preferred direction (and direction selectivity),
    that can be used to determine dominant preferred direction to decode.
    """

    # Init.
    if utids is None:
        utids = UA.utids(as_series=True)
    tasks = utids.index.get_level_values('task').unique()
    recs = util.get_subj_date_pairs(utids)

    # Get DS info frame.
    DSInfo = ua_query.get_DSInfo_table(UA, utids)
    DSInfo['include'] = UInc

    # Calculate population PD and DSI.
    dPPDres = {}
    for rec in recs:
        for task in tasks:

            # Init.
            rt = rec + (task,)
            rtDSInfo = DSInfo.xs(rt, level=[0, 1, -1])
            if rtDSInfo.empty:
                continue

            # Calculate population PD and population DSI.
            res = direction.calc_PPD(rtDSInfo.loc[rtDSInfo.include])
            dPPDres[rt] = res

    PPDres = pd.DataFrame(dPPDres).T

    # Save results.
    if fres is not None:
        results = {'DSInfo': DSInfo, 'PPDres': PPDres}
        util.write_objects(results, fres)

    # Plot results.

    # Init plotting.
    putil.set_style('notebook', 'darkgrid')
    fig, gsp, axs = putil.get_gs_subplots(nrow=len(recs), ncol=len(tasks),
                                          subw=6, subh=6, create_axes=True,
                                          ax_kws_list={'projection': 'polar'})
    xticks = direction.deg2rad(constants.all_dirs + 360/8/2*deg)

    for ir, rec in enumerate(recs):
        for it, task in enumerate(tasks):

            # Init.
            rt = rec + (task,)
            rtDSInfo = DSInfo.xs(rt, level=[0, 1, -1])
            ax = axs[ir, it]
            if rtDSInfo.empty:
                ax.set_axis_off()
                continue
            PDSI, PPD, PPDc, PADc = PPDres.loc[rt]

            # Plot PD - DSI on polar plot.
            sPPDc, sPADc = [int(v) if not np.isnan(v) else v
                            for v in (PPDc, PADc)]
            title = (' '.join(rt) + '\n' +
                     'PPDc = {}$^\circ$ - {}$^\circ$'.format(PPDc, PADc) +
                     ', PDSI = {:.2f}'.format(PDSI))
            PDrad = direction.deg2rad(util.remove_dim_from_series(rtDSInfo.PD))
            pplot.scatter(PDrad, rtDSInfo.DSI, rtDSInfo.include, ylim=[0, 1],
                          title=title, ytitle=1.08, c='darkblue', ec='k',
                          linewidth=1, s=80, alpha=0.8, zorder=2, ax=ax)

            # Highlight PPD and PAD.
            offsets = np.array([-45, 0, 45]) * deg
            for D, c in [(PPDc, 'g'), (PADc, 'r')]:
                if np.isnan(D):
                    continue
                hlDs = direction.deg2rad(np.array(D+offsets))
                for hlD, alpha in [(hlDs, 0.2), ([hlDs[1]], 0.4)]:
                    pplot.bars(hlD, len(hlD)*[1], align='center',
                               alpha=alpha, color=c, zorder=1, ax=ax)

            # Format ticks.
            ax.set_xticks(xticks, minor=True)
            ax.grid(b=True, axis='x', which='minor')
            ax.grid(b=False, axis='x', which='major')
            putil.hide_tick_marks(ax)

    # Save plot.
    title = 'Population direction selectivity'
    putil.save_fig(ffig, fig, title, w_pad=12, h_pad=20)

    return DSInfo, PPDres
Beispiel #6
0
def plot_qm(u,
            bs_stats,
            stab_prd_res,
            prd_inc,
            tr_inc,
            spk_inc,
            add_lbls=False,
            ftempl=None,
            fig=None,
            sps=None):
    """Plot quality metrics related figures."""

    # Init values.
    waveforms = np.array(u.Waveforms)
    wavetime = u.Waveforms.columns * us
    spk_times = np.array(u.SpikeParams['time'], dtype=float)
    base_rate = u.QualityMetrics['baseline']

    # Minimum and maximum gain.
    gmin = u.SessParams['minV']
    gmax = u.SessParams['maxV']

    # %% Init plots.

    # Disable inline plotting to prevent memory leak.
    putil.inline_off()

    # Init figure and gridspec.
    fig = putil.figure(fig)
    if sps is None:
        sps = putil.gridspec(1, 1)[0]
    ogsp = putil.embed_gsp(sps, 2, 1, height_ratios=[0.02, 1])

    info_sps, qm_sps = ogsp[0], ogsp[1]

    # Info header.
    info_ax = fig.add_subplot(info_sps)
    putil.hide_axes(info_ax)
    title = putil.get_unit_info_title(u)
    putil.set_labels(ax=info_ax, title=title, ytitle=0.80)

    # Create axes.
    gsp = putil.embed_gsp(qm_sps, 3, 2, wspace=0.3, hspace=0.4)
    ax_wf_inc, ax_wf_exc = [fig.add_subplot(gsp[0, i]) for i in (0, 1)]
    ax_wf_amp, ax_wf_dur = [fig.add_subplot(gsp[1, i]) for i in (0, 1)]
    ax_amp_dur, ax_rate = [fig.add_subplot(gsp[2, i]) for i in (0, 1)]

    # Trial markers.
    trial_starts, trial_stops = u.TrData.TrialStart, u.TrData.TrialStop
    tr_markers = pd.DataFrame({'time': trial_starts[9::10]})
    tr_markers['label'] = [
        str(itr + 1) if i % 2 else '' for i, itr in enumerate(tr_markers.index)
    ]

    # Common variables, limits and labels.
    WF_T_START = test_sorting.WF_T_START
    spk_t = u.SessParams.sampl_prd * (np.arange(waveforms.shape[1]) -
                                      WF_T_START)
    ses_t_lim = test_sorting.get_start_stop_times(spk_times, trial_starts,
                                                  trial_stops)
    ss, sa = 1.0, 0.8  # marker size and alpha on scatter plot

    # Color spikes by their occurance over session time.
    my_cmap = putil.get_cmap('jet')
    spk_cols = np.tile(np.array([.25, .25, .25, .25]), (len(spk_times), 1))
    if np.any(spk_inc):  # check if there is any spike included
        spk_t_inc = np.array(spk_times[spk_inc])
        tmin, tmax = float(spk_times.min()), float(spk_times.max())
        spk_cols[spk_inc, :] = my_cmap((spk_t_inc - tmin) / (tmax - tmin))
    # Put excluded trials to the front, and randomise order of included trials
    # so later spikes don't systematically cover earlier ones.
    spk_order = np.hstack((np.where(np.invert(spk_inc))[0],
                           np.random.permutation(np.where(spk_inc)[0])))

    # Common labels for plots
    ses_t_lab = 'Recording time (s)'

    # %% Waveform shape analysis.

    # Plot included and excluded waveforms on different axes.
    # Color included by occurance in session time to help detect drifts.
    s_waveforms, s_spk_cols = waveforms[spk_order, :], spk_cols[spk_order]
    wf_t_lim, glim = [min(spk_t), max(spk_t)], [gmin, gmax]
    wf_t_lab, volt_lab = 'WF time ($\mu$s)', 'Voltage'
    for st in ('Included', 'Excluded'):
        ax = ax_wf_inc if st == 'Included' else ax_wf_exc
        spk_idx = spk_inc if st == 'Included' else np.invert(spk_inc)
        tr_idx = tr_inc if st == 'Included' else np.invert(tr_inc)

        nspsk, ntrs = sum(spk_idx), sum(tr_idx)
        title = '{} WFs, {} spikes, {} trials'.format(st, nspsk, ntrs)

        # Select waveforms and colors.
        rand_spk_idx = spk_idx[spk_order]
        wfs = s_waveforms[rand_spk_idx, :]
        cols = s_spk_cols[rand_spk_idx]

        # Plot waveforms.
        xlab, ylab = (wf_t_lab, volt_lab) if add_lbls else (None, None)
        pwaveform.plot_wfs(wfs,
                           spk_t,
                           cols=cols,
                           lw=0.1,
                           alpha=0.05,
                           xlim=wf_t_lim,
                           ylim=glim,
                           title=title,
                           xlab=xlab,
                           ylab=ylab,
                           ax=ax)

    # %% Waveform summary metrics.

    # Init data.
    wf_amp_all = u.SpikeParams['amplitude']
    wf_amp_inc = wf_amp_all[spk_inc]
    wf_dur_all = u.SpikeParams['duration']
    wf_dur_inc = wf_dur_all[spk_inc]

    # Set common limits and labels.
    dur_lim = [0, wavetime[-2] - wavetime[WF_T_START]]  # same across units
    glim = max(wf_amp_all.max(), gmax - gmin)
    amp_lim = [0, glim]

    amp_lab = 'Amplitude'
    dur_lab = 'Duration ($\mu$s)'

    # Waveform amplitude across session time.
    m_amp, sd_amp = wf_amp_inc.mean(), wf_amp_inc.std()
    title = 'WF amplitude: {:.1f} $\pm$ {:.1f}'.format(m_amp, sd_amp)
    xlab, ylab = (ses_t_lab, amp_lab) if add_lbls else (None, None)
    pplot.scatter(spk_times,
                  wf_amp_all,
                  spk_inc,
                  c='m',
                  bc='grey',
                  s=ss,
                  xlab=xlab,
                  ylab=ylab,
                  xlim=ses_t_lim,
                  ylim=amp_lim,
                  edgecolors='',
                  alpha=sa,
                  id_line=False,
                  title=title,
                  ax=ax_wf_amp)

    # Waveform duration across session time.
    mdur, sdur = wf_dur_inc.mean(), wf_dur_inc.std()
    title = 'WF duration: {:.1f} $\pm$ {:.1f} $\mu$s'.format(mdur, sdur)
    xlab, ylab = (ses_t_lab, dur_lab) if add_lbls else (None, None)
    pplot.scatter(spk_times,
                  wf_dur_all,
                  spk_inc,
                  c='c',
                  bc='grey',
                  s=ss,
                  xlab=xlab,
                  ylab=ylab,
                  xlim=ses_t_lim,
                  ylim=dur_lim,
                  edgecolors='',
                  alpha=sa,
                  id_line=False,
                  title=title,
                  ax=ax_wf_dur)

    # Waveform duration against amplitude.
    title = 'WF duration - amplitude'
    xlab, ylab = (dur_lab, amp_lab) if add_lbls else (None, None)
    pplot.scatter(wf_dur_all[spk_order],
                  wf_amp_all[spk_order],
                  c=spk_cols[spk_order],
                  s=ss,
                  xlab=xlab,
                  ylab=ylab,
                  xlim=dur_lim,
                  ylim=amp_lim,
                  edgecolors='',
                  alpha=sa,
                  id_line=False,
                  title=title,
                  ax=ax_amp_dur)

    # %% Firing rate.

    tmean = np.array(bs_stats['tmean'])
    rmean = util.remove_dim_from_series(bs_stats['rate'])
    prd_tstart, prd_tstop = stab_prd_res['tstart'], stab_prd_res['tstop']

    # Color segments depending on whether they are included / excluded.
    def plot_periods(v, color, ax):
        # Plot line segments.
        for i in range(len(prd_inc[:-1])):
            col = color if prd_inc[i] and prd_inc[i + 1] else 'grey'
            x, y = [(tmean[i], tmean[i + 1]), (v[i], v[i + 1])]
            ax.plot(x, y, color=col)
        # Plot line points.
        for i in range(len(prd_inc)):
            col = color if prd_inc[i] else 'grey'
            x, y = [tmean[i], v[i]]
            ax.plot(x,
                    y,
                    color=col,
                    marker='o',
                    markersize=3,
                    markeredgecolor=col)

    # Firing rate over session time.
    title = 'Baseline rate: {:.1f} spike/s'.format(float(base_rate))
    xlab, ylab = (ses_t_lab, putil.FR_lbl) if add_lbls else (None, None)
    ylim = [0, 1.25 * np.max(rmean)]
    plot_periods(rmean, 'b', ax_rate)
    pplot.lines([], [],
                c='b',
                xlim=ses_t_lim,
                ylim=ylim,
                title=title,
                xlab=xlab,
                ylab=ylab,
                ax=ax_rate)

    # Trial markers.
    putil.plot_events(tr_markers,
                      lw=0.5,
                      ls='--',
                      alpha=0.35,
                      y_lbl=0.92,
                      ax=ax_rate)

    # Excluded periods.
    excl_prds = []
    tstart, tstop = ses_t_lim
    if tstart != prd_tstart:
        excl_prds.append(('beg', tstart, prd_tstart))
    if tstop != prd_tstop:
        excl_prds.append(('end', prd_tstop, tstop))
    putil.plot_periods(excl_prds, ymax=0.92, ax=ax_rate)

    # %% Post-formatting.

    # Maximize number of ticks on recording time axes to prevent covering.
    for ax in (ax_wf_amp, ax_wf_dur, ax_rate):
        putil.set_max_n_ticks(ax, 6, 'x')

    # %% Save figure.
    if ftempl is not None:
        fname = ftempl.format(u.name_to_fname())
        putil.save_fig(fname, fig, title, rect_height=0.92)
        putil.inline_on()

    return [ax_wf_inc, ax_wf_exc], ax_wf_amp, ax_wf_dur, ax_amp_dur, ax_rate
Beispiel #7
0
    def __init__(self, TPLCell=None, rec_info=None, kset=None):
        """Create Unit instance from TPLCell data structure."""

        # Create empty instance.
        self.Name = ''
        self.UnitParams = pd.Series()
        self.SessParams = pd.Series()
        self.Waveforms = pd.DataFrame()
        self.SpikeParams = pd.DataFrame()
        self.Events = pd.DataFrame()
        self.TrData = pd.DataFrame()
        self._Spikes = Spikes([])
        self._Rates = pd.Series()
        self.QualityMetrics = pd.Series()
        self.DS = pd.Series()
        self.TaskRelPrds = pd.Series()

        # Default unit params.
        self.UnitParams['empty'] = True
        self.UnitParams['excluded'] = True

        # Return if no TPLCell is passed.
        if TPLCell is None:
            return

        # %% Session parameters.

        # Prepare session params.
        fname_pars = util.params_from_fname(TPLCell.File)
        subj, date, elec = fname_pars[['subj', 'date', 'elec']]
        task, task_idx, sortno = fname_pars[['task', 'idx', 'sortno']]
        [ch, ux] = TPLCell.ChanUnit
        sampl_prd = (1 / (TPLCell.Info.Frequency * Hz)).rescale(us)
        pinfo = [p.tolist() if isinstance(p, np.ndarray)
                 else p for p in TPLCell.PInfo]

        # Assign session params.
        sp_list = [('task', task),
                   ('task #', task_idx),
                   ('subj', subj),
                   ('date', date),
                   ('elec', elec),
                   ('ch', ch),
                   ('ux', ux),
                   ('sort #', sortno),
                   ('filepath', TPLCell.Filename),
                   ('filename', TPLCell.File),
                   ('paraminfo', pinfo),
                   ('sampl_prd', sampl_prd)]
        self.SessParams = util.series_from_tuple_list(sp_list)
        self.SessParams = self.SessParams.append(rec_info)

        # Name unit.
        self.set_name()

        # Unit params.
        self.UnitParams['empty'] = False
        self.UnitParams['excluded'] = False

        # %% Waveforms.

        if 'Waves' in TPLCell._fieldnames:
            wfs = TPLCell.Waves
            if wfs.ndim == 1:  # there is only a single spike
                wfs = np.reshape(wfs, (1, len(wfs)))  # extend it to matrix
            wf_sampl_t = float(sampl_prd) * np.arange(wfs.shape[1])
            self.Waveforms = pd.DataFrame(wfs, columns=wf_sampl_t)

        # %% Spike params.

        if 'Spikes' in TPLCell._fieldnames:
            spk_pars = [('time', util.fill_dim(np.array(TPLCell.Spikes))),
                        ('included', True)]
            self.SpikeParams = pd.DataFrame.from_items(spk_pars)

        # %% Stimulus parameters.

        stim_params = constants.stim_params

        # Extract all trial parameters.
        trpars = pd.DataFrame(TPLCell.TrialParams, columns=TPLCell.Header)

        # Extract stimulus parameters.
        StimParams = trpars[stim_params.name]
        StimParams.columns = stim_params.index

        # Change type if required.
        stim_pars = StimParams.copy()
        for stim_par in stim_pars:
            stim_type = stim_params.type[stim_par]
            if stim_type is not None:
                stim_pars[stim_par] = stim_pars[stim_par].astype(stim_type)
        StimParams = stim_pars

        # Combine x and y stimulus coordinates into a single location variable.
        stim_pars = StimParams.copy()
        for stim in stim_pars.columns.levels[0]:
            pstim = stim_pars[stim]
            if ('LocX' in pstim.columns) and ('LocY' in pstim.columns):
                lx, ly = pstim.LocX, pstim.LocY
                stim_pars[stim, 'Loc'] = [(x, y) for x, y in zip(lx, ly)]
        StimParams = stim_pars.sort_index(axis=1)

        # Add same-different columns (S/D trials).
        feats = np.unique([f[1] for f in StimParams.columns
                           if util.is_iterable(f) and len(f) == 2])
        for feat in feats:
                s1f, s2f, dsf = ('S1', feat), ('S2', feat), ('S_D', feat)
                if (s1f in StimParams) and (s2f in StimParams):
                    StimParams[dsf] = 'diff'
                    isame = (StimParams[s1f] == StimParams[s2f])
                    StimParams.loc[isame, dsf] = 'same'

        # %% Subject answer parameters.

        Answer = pd.DataFrame()

        # Recode correct/incorrect answer column.
        corr_ans = trpars['subjectAnswer']
        if len(corr_ans.unique()) > 2:
            corr_ans_vals = ', '.join([str(v) for v in corr_ans.unique()])
            warnings.warn(('More than 2 unique values for correct answer: ' +
                           corr_ans_vals))
        corr_ans = corr_ans == corr_ans.max()  # higher value is correct!
        Answer['correct'] = corr_ans

        # Add column for subject response (saccade direction).
        same_dir = StimParams['S1', 'Dir'] == StimParams['S2', 'Dir']
        # This is not actually correct for passive task!
        Answer['saccade'] = ((same_dir & corr_ans) | (~same_dir & ~corr_ans))

        # %% Trial events.

        # Timestamps of events. Only S1 offset and S2 onset are reliable!
        # S1 onset and S2 offset are fixed to these two.
        # Altogether these four are called anchor events.

        # Watch out: indexing starting with 1 in TPLCell (Matlab)!
        # Everything is in seconds below!

        if 'rel_times' in TPLCell._fieldnames:
            # Use relative times aligned to trial start (single-unit data).
            rel_times = TPLCell.rel_times
            anchor_evts = [('S1 on', rel_times.S1_on),
                           ('S1 off', rel_times.S1_off),
                           ('S2 on', rel_times.S2_on),
                           ('S2 off', rel_times.S2_off)]

        else:
            # Use absolute times (multi-unit data).
            S1dur = float(constants.stim_dur['S1'].rescale(s))
            S2dur = float(constants.stim_dur['S2'].rescale(s))
            iS1off = TPLCell.Patterns.matchedPatterns[:, 2]-1
            iS2on = TPLCell.Patterns.matchedPatterns[:, 3]-1
            ts = TPLCell.Timestamps
            anchor_evts = [('S1 on', ts[iS1off]-S1dur),
                           ('S1 off', ts[iS1off]),
                           ('S2 on', ts[iS2on]),
                           ('S2 off', ts[iS2on]+S2dur)]

        anchor_evts = pd.DataFrame.from_items(anchor_evts)

        # Align trial events to S1 onset.
        S1_onset = anchor_evts['S1 on']  # this is also used below!
        anchor_evts = anchor_evts.subtract(S1_onset, axis=0)

        # Add additional trial events, relative to anchor events.
        evts = [(evt, anchor_evts[rel]+float(offset.rescale(s)))
                for evt, (rel, offset) in constants.tr_evts.iterrows()]
        evts = pd.DataFrame.from_items(evts)

        # Update saccade (end of recording) if info available.
        if ('rel_times' in TPLCell._fieldnames and
            'saccade' in TPLCell.rel_times._fieldnames):
            evts['saccade'] = TPLCell.rel_times.saccade - S1_onset

        # Add dimension to timestamps (ms).
        for evt in evts:
            evts[evt] = util.add_dim_to_series(1000*evts[evt], ms)  # s --> ms
        self.Events = evts

        # %% Trial parameters

        TrialParams = pd.DataFrame()

        # Add start time, end time and length of each trials.
        if 'Timestamps' in TPLCell._fieldnames:
            tstamps = TPLCell.Timestamps
            tr_times = np.array([(tstamps[i1-1], tstamps[i2-1]) for i1, i2
                                 in TPLCell.Info.successfull_trials_indices])
            tr_times = tr_times * s
            for name, col in [('TrialStart', tr_times[:, 0]),
                              ('TrialStop', tr_times[:, 1]),
                              ('TrialLength', tr_times[:, 1]-tr_times[:, 0])]:
                util.add_quant_col(TrialParams, col, name)

        # Add trial period lengths to trial params.
        TrialParams['S1Len'] = evts['S1 off'] - evts['S1 on']
        TrialParams['S2Len'] = evts['S2 off'] - evts['S2 on']
        TrialParams['DelayLenPrec'] = evts['S2 on'] - evts['S1 off']

        # "Categorical" (rounded) delay length variable.
        delay_lens = util.dim_series_to_array(TrialParams['DelayLenPrec'])
        len_diff = [(i, np.abs(delay_lens - dl))
                    for i, dl in enumerate(constants.del_lens)]
        min_diff = pd.DataFrame.from_items(len_diff).idxmin(1)
        dlens = constants.del_lens[min_diff]
        TrialParams['DelayLen'] = list(util.remove_dim_from_series(dlens))

        # Add target feature to be reported.
        if task == 'com':  # Combined task: target feature varies.
            to_report = trpars.TrialType.replace([0, 1], ['loc', 'dir'])
        else:
            to_report = constants.to_report(task)
        TrialParams['ToReport'] = to_report

        # Init included trials (all trials included initially).
        TrialParams['included'] = np.array(True, dtype=bool)

        # %% Assamble full trial data frame.

        StimParams.columns = StimParams.columns.tolist()
        self.TrData = pd.concat([TrialParams, StimParams, Answer], axis=1)

        # %% Spikes.

        # Trials spikes, aligned to S1 onset.
        spk_trains = [(spk_train - S1_onset[i]) * s  # align to S1 on
                      for i, spk_train in enumerate(TPLCell.TrialSpikes)]
        t_starts = self.ev_times('fixate')  # start of trial
        t_stops = self.ev_times('saccade')  # end of trial
        self._Spikes = Spikes(spk_trains, t_starts, t_stops)

        # %% Rates.

        # Estimate firing rate in each trial.
        for name, (kernel, step) in kset.iterrows():
            self.add_rate(name, kernel, step)
Beispiel #8
0
def test_task_relatedness(u, p_th=0.05):
    """Test if unit has any task related activity."""

    # Init.
    nrate = u.init_nrate()
    wndw_len, minFR = QC_THs.loc[u.get_region()]
    if not len(u.inc_trials()):
        return False

    # Get baseline rate per trial.
    baseline = util.remove_dim_from_series(u.get_prd_rates('baseline'))

    # Init periods and trials sets to test.
    feats = ('Dir', )  # ('Dir', 'Loc')
    prds_trs = [('S1', [('S1', 'early delay', 'late delay'), feats]),
                ('S2', [('S2', 'post-S2'), feats])]
    prds_trs = pd.DataFrame.from_items(prds_trs,
                                       orient='index',
                                       columns=['prds', 'trpars'])

    # Go through each stimulus, period and trial parameter to be tested.
    pval = []
    mean_rate = []
    for stim, (prds, trpars) in prds_trs.iterrows():

        for prd in prds:
            t1s, t2s = u.pr_times(prd, add_latency=False, concat=False)

            for par in trpars:
                ptrs = u.trials_by_param((stim, par))

                for vpar, trs in ptrs.iteritems():

                    # Get rates during period on trials with given param value.
                    rates = u._Rates[nrate].get_rates(trs, t1s, t2s)
                    bs_rates = baseline[trs]

                    # No rates available.
                    if rates.empty:
                        continue

                    # Get sub-period around time with maximal rate.
                    tmax = rates.mean().argmax()
                    tmin, tmax = rates.columns.min(), rates.columns.max()
                    tstart, tend = stats.prd_in_window(tmax, tmin, tmax,
                                                       wndw_len, ms)
                    tidx = (rates.columns >= tstart) & (rates.columns <= tend)

                    # Test difference from baseline rate.
                    wnd_rates = rates.loc[:, tidx].mean(1)
                    stat, p = stats.mann_whithney_u_test(wnd_rates, bs_rates)
                    pval.append(((stim, prd, par, str(vpar)), p))

                    # Mean rate.
                    mrate = rates.mean().mean()
                    mean_rate.append(((stim, prd, par, str(vpar)), mrate))

    # Format results.
    names = ['stim', 'prd', 'par', 'vpar']
    pval, mean_rate = [
        util.series_from_tuple_list(res, names) for res in (pval, mean_rate)
    ]

    # Save results to unit.
    u.PrdParTests = pd.concat([mean_rate, pval],
                              axis=1,
                              keys=['mean_rate', 'pval'])
    u.PrdParTests['sign'] = u.PrdParTests['pval'] < p_th

    # Save test parameters.
    u.PrdParTests.test = 'mann_whithney_u_test'
    u.PrdParTests.p_th = p_th

    # Is there any task- (stimulus-parameter-) related period?
    has_min_rate = (u.PrdParTests.mean_rate >= minFR).any()
    is_task_related = u.PrdParTests.sign.any()

    return has_min_rate, is_task_related
Beispiel #9
0
def rec_stability_test(UA, fname=None, periods=None):
    """Check stability of recording session across tasks."""

    # Init.
    if periods is None:
        periods = ['whole trial', 'fixation']

    # Init figure.
    fig, gsp, axs = putil.get_gs_subplots(nrow=len(periods), ncol=1,
                                          subw=10, subh=2.5, create_axes=True,
                                          as_array=False)

    for prd, ax in zip(periods, axs):

        # Calculate and plot firing rate during given period in each trial
        # across session for all units.
        colors = putil.get_colors()
        task_stats = pd.DataFrame(columns=['t_start', 't_stops', 'label'])
        for task, color in zip(UA.tasks(), colors):

            # Get activity of all units in task.
            tr_rates = []
            for u in UA.iter_thru([task]):
                rates = u.get_prd_rates(prd, tr_time_idx=True)
                tr_rates.append(util.remove_dim_from_series(rates))
            tr_rates = pd.DataFrame(tr_rates)

            # Not (non-empty and included) unit during task.
            if not len(tr_rates.index):
                continue

            # Plot each rate in task.
            tr_times = tr_rates.columns
            pplot.lines(tr_times, tr_rates.T, zorder=1, alpha=0.5,
                        color=color, ax=ax)

            # Plot mean +- sem rate.
            tr_time = tr_rates.columns
            mean_rate, sem_rate = tr_rates.mean(), tr_rates.std()
            lower, upper = mean_rate-sem_rate, mean_rate+sem_rate
            lower[lower < 0] = 0  # remove negative values
            ax.fill_between(tr_time, lower, upper, zorder=2, alpha=.5,
                            facecolor='grey', edgecolor='grey')
            pplot.lines(tr_time, mean_rate, lw=2, color='k', ax=ax)

            # Add task stats.
            task_lbl = '{}, {} units'.format(task, len(tr_rates.index))

            # Add grand mean FR.
            task_lbl += '\nFR: {:.1f} sp/s'.format(tr_rates.mean().mean())

            # Calculate linear trend to test gradual drift.
            slope, _, _, p_value, _ = sp.stats.linregress(tr_times, mean_rate)
            slope = 3600*slope  # convert to change in spike per hour
            pval = util.format_pvalue(p_value, max_digit=3)
            task_lbl += '\n$\delta$FR: {:.1f} sp/s/h'.format(slope)
            task_lbl += '\n{}'.format(pval)

            task_stats.loc[task] = (tr_times.min(), tr_times.max(), task_lbl)

        # Set axes limits.
        tmin, tmax = task_stats.t_start.min(), task_stats.t_stops.max()
        putil.set_limits(ax, xlim=(tmin, tmax))

        # Add task labels after all tasks have been plotted.
        putil.plot_events(task_stats[['t_start', 'label']], y_lbl=0.75,
                          lbl_ha='left', lbl_rotation=0, ax=ax)

        # Format plot.
        xlab = 'Recording time (s)' if prd == periods[-1] else None
        putil.set_labels(ax, xlab=xlab, ylab=prd)
        putil.set_spines(ax, left=False)

    # Save figure.
    title = 'Recording stability of ' + UA.Name
    putil.save_fig(fname, fig, title)