Пример #1
0
class Good(base.AnalysisBase):
    requires = ['']
    sets = ['good_%s'%cs for cs in config.stimuli()]
    across = 'day'
    updated = '181003'

    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            stimresp = self.analysis('stim_dff_alltrials_%s' % cs)
            dff_active = np.sum(stimresp > 0.025)/float(len(stimresp))
            out['good_%s' % cs] = dff_active > 0.05

        return out
Пример #2
0
    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            if self.analysis('good_%s' % cs):
                out['repcount_%s'%cs], out['repcount_pair_%s'%cs] = \
                    self.event_counts(date.runs(['spontaneous', 'sated']), cs, 0.1)

                out['repcount_hungry_%s'%cs], out['repcount_pair_hungry_%s'%cs] = \
                    self.event_counts(date.runs(['spontaneous', 'hungry']), cs, 0.1)

                out['repcount_iti_%s'%cs], out['repcount_pair_iti_%s'%cs] = \
                    self.event_counts(date.runs(['training']), cs, 0.1)

                out['repcount_comb_%s'%cs], out['repcount_pair_comb_%s'%cs] = \
                    self.event_counts(date.runs(), cs, 0.1)

        return out
Пример #3
0
    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            out['stim_dff_%s' % cs] = self.get_stimuli(date.runs('training'),
                                                       cs, (0, 2),
                                                       'dff',
                                                       all_trials=False)
            out['stim_dff_2_4_%s' % cs] = self.get_stimuli(
                date.runs('training'), cs, (2, 4), 'dff', all_trials=False)
            out['stim_dff_all_%s' % cs] = self.get_stimuli(
                date.runs('training'), cs, (0, 2), 'dff', all_trials=True)
        out['stim_dff_all_pertrial'] = self.get_stimuli_per_trial(
            date.runs('training'), (0, 2), 'dff')

        return out
Пример #4
0
    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        all = None
        for cs in config.stimuli():
            trs = self.get_stimuli(date.runs('training'), cs, (0, 2), 'dff')
            all = trs if all is None else np.concatenate(
                [all, (trs.T - np.nanmean(trs, axis=1)).T], axis=1)
            out['noisecorr_%s' % cs] = self.calc_noisecorr_cohen(trs)

        out['noisecorr_hofer'] = self.calc_noisecorr_hofer(all)

        return out
Пример #5
0
    def run(self, run):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        run : Run object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()
        t2p = run.trace2p()
        trs = t2p.trace('deconvolved')

        if run.run_type != 'training':
            c2p = run.classify2p()
            mask = t2p.inactivity()
        else:
            c2p = self.training_classifier(run)

            # Take the times that were during stimuli or during inactivity and block them
            mask = t2p.trialmask(padpre=0.1, padpost=0.5)
            mask = np.bitwise_or(mask, t2p.inactivity(nostim=False))
            mask = np.invert(mask)

        out['repevent_inactive_frames'] = np.sum(mask)

        for cs in config.stimuli():
            if self.analysis('good_%s' % cs):
                out['repevent_%s' % cs] = c2p.events(cs,
                                                     0.1,
                                                     trs,
                                                     mask=mask,
                                                     xmask=True,
                                                     max=2,
                                                     downfor=2,
                                                     maxlen=-1)

        return out
Пример #6
0
    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            out['vdrive_%s' % cs] = self.responses(date.runs('training'), cs)
            out['vdrive_fraction_%s' % cs] = np.nansum(out['vdrive_%s'%cs] > 50)/float(len(out['vdrive_%s'%cs]))

        return out
Пример #7
0
    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            stimresp = self.analysis('stim_dff_alltrials_%s' % cs)
            dff_active = np.sum(stimresp > 0.025)/float(len(stimresp))
            out['good_%s' % cs] = dff_active > 0.05

        return out
Пример #8
0
class Repcount(base.AnalysisBase):
    requires = ['classifier']
    sets = ['repcount_%s'%cs for cs in config.stimuli()] + \
           ['repcount_hungry_%s'%cs for cs in config.stimuli()] + \
           ['repcount_iti_%s'%cs for cs in config.stimuli()] + \
           ['repcount_comb_%s'%cs for cs in config.stimuli()] + \
           ['repcount_pair_%s'%cs for cs in config.stimuli()] + \
           ['repcount_pair_hungry_%s'%cs for cs in config.stimuli()] + \
           ['repcount_pair_iti_%s'%cs for cs in config.stimuli()] + \
           ['repcount_pair_comb_%s'%cs for cs in config.stimuli()]
    across = 'day'
    updated = '181006'

    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            if self.analysis('good_%s' % cs):
                out['repcount_%s'%cs], out['repcount_pair_%s'%cs] = \
                    self.event_counts(date.runs(['spontaneous', 'sated']), cs, 0.1)

                out['repcount_hungry_%s'%cs], out['repcount_pair_hungry_%s'%cs] = \
                    self.event_counts(date.runs(['spontaneous', 'hungry']), cs, 0.1)

                out['repcount_iti_%s'%cs], out['repcount_pair_iti_%s'%cs] = \
                    self.event_counts(date.runs(['training']), cs, 0.1)

                out['repcount_comb_%s'%cs], out['repcount_pair_comb_%s'%cs] = \
                    self.event_counts(date.runs(), cs, 0.1)

        return out

    def event_counts(self,
                     runs,
                     cs,
                     deconvolved_threshold=0.2,
                     trange=(-2, 3)):
        """
        Find all replay events
        :param cs:
        :return:
        """

        count_pair = None
        count = None
        for run in runs:
            evs = self.analysis('repevent_%s' % cs, run)
            t2p = run.trace2p()

            if count_pair is None:
                count_pair = np.zeros((t2p.ncells, t2p.ncells))
                count = np.zeros(t2p.ncells)

            if evs is not None:
                trs = t2p.trace('deconvolved')

                for ev in evs:
                    if -1 * trange[0] < ev < np.shape(trs)[1] - trange[1]:
                        act = np.nanmax(trs[:, ev + trange[0]:ev + trange[1]],
                                        axis=1)
                        act = act > deconvolved_threshold

                        actout = np.zeros(
                            (len(act), len(act)),
                            dtype=np.bool)  # pairs of cells that overlapped
                        for i in range(len(act)):  # iterate over cells
                            actout[i, :] = np.bitwise_and(act[i], act)

                        count_pair += actout.astype(np.float64)
                        count += act.astype(np.float64)

        return count, count_pair
Пример #9
0
class Stim(base.AnalysisBase):
    requires = ['']
    sets = ['stim_dff_%s' % cs for cs in config.stimuli()] + \
           ['stim_dff_2_4_%s' % cs for cs in config.stimuli()] + \
           ['stim_dff_all_%s' % cs for cs in config.stimuli()] + \
           ['stim_dff_all_pertrial']
    across = 'day'
    updated = '190219'

    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            out['stim_dff_%s' % cs] = self.get_stimuli(date.runs('training'),
                                                       cs, (0, 2),
                                                       'dff',
                                                       all_trials=False)
            out['stim_dff_2_4_%s' % cs] = self.get_stimuli(
                date.runs('training'), cs, (2, 4), 'dff', all_trials=False)
            out['stim_dff_all_%s' % cs] = self.get_stimuli(
                date.runs('training'), cs, (0, 2), 'dff', all_trials=True)
        out['stim_dff_all_pertrial'] = self.get_stimuli_per_trial(
            date.runs('training'), (0, 2), 'dff')

        return out

    def get_stimuli(self, runs, cs, trange, ttype, all_trials=False):
        """
        Get stimulus responses from training runs.

        Parameters
        ----------
        runs : RunSorter
            Contains all Run objects
        cs : str
            Stimulus name
        trange : tuple of ints
            Time range to average over
        ttype : str
            Trace type, 'dff' or 'deconvolved'
        all_trials : bool
            If true, include time after licking and miss trials

        Returns
        -------
        numpy array
            Vector of mean responses of length ncells.

        """

        err = -1 if all_trials else 0
        lick = -1 if all_trials else 100
        baseline = (-1, 0) if 'dec' not in ttype else (-1, -1)

        # Go through the added stimuli and add all onsets
        trs = []
        for run in runs:
            with warnings.catch_warnings():
                t2p = run.trace2p()
                # cstrs = ncells, frames, nstimuli/onsets
                cstrs = t2p.cstraces(cs, trange[0], trange[1], ttype, lick,
                                     err, baseline)

                warnings.simplefilter('ignore', category=RuntimeWarning)

                # if len(trs) == 0:
                #     trs = np.nanmean(cstrs, axis=1)  # ncells, frames, nstimuli/onsets
                # else:
                #     trs = np.concatenate([trs, np.nanmean(cstrs, axis=1)], axis=1)
                trs.append(np.nanmean(cstrs, axis=1))

                # Include pavlovian trials with plus trials
                if trange[1] <= 2 and cs == 'plus':
                    pav = t2p.cstraces('pavlovian', trange[0], trange[1],
                                       ttype, lick, err, baseline)
                    # trs = np.concatenate([trs, np.nanmean(pav, axis=1)], axis=1)
                    trs.append(np.nanmean(pav, axis=1))

        trs_cat = np.concatenate(trs, axis=1)

        return np.nanmean(trs_cat, axis=1)

    def get_stimuli_per_trial(self, runs, trange, ttype):

        lick = -1
        err = -1
        baseline = (-1, 0) if 'dec' not in ttype else (-1, -1)

        result = [pd.DataFrame()]
        for run in runs:
            t2p = run.trace2p()
            # cstrs = ncells, frames, nstimuli/onsets
            cstrs = t2p.cstraces('', trange[0], trange[1], ttype, lick, err,
                                 baseline)
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', category=RuntimeWarning)
                trs = np.nanmean(cstrs, axis=1)
            for roi_idx, roi_response in enumerate(trs):
                index = pd.MultiIndex.from_product(
                    [[run.mouse], [run.date], [run.run], [roi_idx],
                     range(len(roi_response))],
                    names=['mouse', 'date', 'run', 'roi_idx', 'trial_idx'])
                result.append(
                    pd.DataFrame({'response': roi_response}, index=index))

        df = pd.concat(result, axis=0)

        return df
Пример #10
0
class Noisecorr(base.AnalysisBase):
    requires = ['']
    sets = ['noisecorr_%s'%cs for cs in config.stimuli()] + \
           ['noisecorr_hofer']
    across = 'day'
    updated = '181031'

    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        all = None
        for cs in config.stimuli():
            trs = self.get_stimuli(date.runs('training'), cs, (0, 2), 'dff')
            all = trs if all is None else np.concatenate(
                [all, (trs.T - np.nanmean(trs, axis=1)).T], axis=1)
            out['noisecorr_%s' % cs] = self.calc_noisecorr_cohen(trs)

        out['noisecorr_hofer'] = self.calc_noisecorr_hofer(all)

        return out

    @staticmethod
    def get_stimuli(runs, cs, trange=(0, 2), ttype='dff', nolick=False):
        """
        Get stimulus responses from training runs

        Parameters
        ----------
        runs : RunSorter
            Contains all Run objects
        cs : str
            Stimulus name
        trange : tuple of ints
            Time range to average over
        ttype : str
            Trace type, 'dff' or 'deconvolved'
        nolick : bool
            If true, remove time after licking

        Returns
        -------
        numpy array
            Vector of mean responses of length ncells
        """

        err = -1
        lick = -1 if all else 100
        baseline = (-1, 0) if ttype == 'dff' else (-1, -1)

        # Go through the added stimuli and add all onsets
        trs = []
        for run in runs:
            with warnings.catch_warnings():
                t2p = run.trace2p()
                cstrs = t2p.cstraces(cs, trange[0], trange[1], ttype, lick,
                                     err, baseline)

                warnings.simplefilter('ignore', category=RuntimeWarning)

                if len(trs) == 0:
                    trs = np.nanmean(cstrs,
                                     axis=1)  # ncells, frames, nstimuli/onsets
                else:
                    trs = np.concatenate([trs, np.nanmean(cstrs, axis=1)],
                                         axis=1)

                if trange[1] <= 2 and cs == 'plus':
                    pav = t2p.cstraces('pavlovian', trange[0], trange[1],
                                       ttype, lick, err, baseline)
                    trs = np.concatenate([trs, np.nanmean(pav, axis=1)],
                                         axis=1)

        return trs

    @staticmethod
    def calc_noisecorr_cohen(trs, includes_nans=False, nrand=500):
        """
        Calculate the noise correlations of cells via their traces.
        Uses the method of Marlene Cohen and John Maunsell.

        Parameters
        ----------
        trs : matrix of ncells x ntrials
            Traces of ncells x ntrials
        includes_nans : bool
            If true, use Pandas to calculate the correlation to account for NaNs
        nrand : int
            Number of randomizations to subtract

        Returns
        -------
        matrix of ncells x ncells
            Noise correlations
        """

        if np.shape(trs)[1] < 10:
            return np.nan
        else:
            if not includes_nans:
                out = np.corrcoef(trs)
            else:
                dftrs = pd.DataFrame(trs.T)
                out = dftrs.corr().as_matrix()

            ncells = np.shape(trs)[0]
            stimorder = np.arange(np.shape(trs)[1])
            for i in range(nrand):
                for c in range(ncells):
                    np.random.shuffle(stimorder)
                    trs[c, :] = trs[c, stimorder]

                if not includes_nans:
                    out -= np.corrcoef(trs) / float(nrand)
                else:
                    dftrs = pd.DataFrame(trs.T)
                    out -= dftrs.corr().as_matrix() / float(nrand)

        return out

    @staticmethod
    def calc_noisecorr_hofer(trs, includes_nans=False):
        """
        Calculate the noise correlations of cells via their traces.
        Requires that the mean stimulus response has been subtracted.
        Uses the method of Sonja Hofer.

        Parameters
        ----------
        trs : matrix of ncells x ntrials
            Traces of ncells x ntrials
        includes_nans : bool
            If true, use Pandas to calculate the correlation to account for NaNs

        Returns
        -------
        matrix of ncells x ncells
            Noise correlations
        """

        if not includes_nans:
            return np.corrcoef(trs)
        else:
            dftrs = pd.DataFrame(trs.T)
            return dftrs.corr().as_matrix()
Пример #11
0
class Vdrive(base.AnalysisBase):
    requires = ['']
    sets = ['vdrive_%s'%cs for cs in config.stimuli()] + \
           ['vdrive_fraction_%s'%cs for cs in config.stimuli()]
    across = 'day'
    updated = '181028'

    def run(self, date):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        date : Date object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()

        for cs in config.stimuli():
            out['vdrive_%s' % cs] = self.responses(date.runs('training'), cs)
            out['vdrive_fraction_%s' % cs] = np.nansum(out['vdrive_%s'%cs] > 50)/float(len(out['vdrive_%s'%cs]))

        return out

    def median_first_lick(self, runs, cs):
        """
        Get the median first lick

        Parameters
        ----------
        runs : RunSorter
        cs : str
            Stimulus name

        Returns
        -------
        float
            Median first lick time in frames
        """

        firstlicks = []
        for run in runs:
            t2p = run.trace2p()
            fl = t2p.firstlick(cs, units='frames', maxframes=t2p.framerate*2)
            fl[np.isnan(fl)] = int(round(t2p.framerate*2))
            firstlicks = np.concatenate([firstlicks, fl], axis=0)

        if len(firstlicks) < 2: return int(round(t2p.framerate*2))
        return np.nanmedian(firstlicks)

    @staticmethod
    def gettrials(runs, cs, start=0, end=0, error_trials=-1, lick=-1):
        """
        Get all training trials.

        Parameters
        ----------
        runs : RunSorter
        cs : str
            Stimulus name
        start : float
            Beginning of time to integrate
        end : float
            End of time to integrate
        error_trials : int
            -1 all trials, 0 correct trials, 1 error trials
        lick : float
            Number of milliseconds to cut off before the first lick

        Returns
        -------
        numpy matrix
            All trials of size ncells, ntrials

        """

        alltrs = []
        for run in runs:
            t2p = run.trace2p()

            # ncells, frames, nstimuli/onsets
            trs = t2p.cstraces(cs, start_s=start, end_s=end, trace_type='dff',
                               cutoff_before_lick_ms=lick, errortrials=error_trials)
            if cs == 'plus':
                pavs = t2p.cstraces('pavlovian', start_s=start, end_s=end, trace_type='dff',
                                    cutoff_before_lick_ms=lick, errortrials=error_trials)
                trs = np.concatenate([trs, pavs], axis=2)

            if len(alltrs) == 0:
                alltrs = trs
            else:
                alltrs = np.concatenate([alltrs, trs], axis=2)

        alltrs = np.nanmean(alltrs, axis=1)  # across frames
        return alltrs

    def responses(self, runs, cs, tintegrate=0.3, pval=0.05, ncses=3, nolick=True):

        mfl = self.median_first_lick(runs, cs)
        fr = self.analysis('framerate')
        fintegrate = int(round(tintegrate*fr))

        # Cut off the first number after the median first lick
        ts = np.arange(0, 2*fr+1, fintegrate)
        am = np.argmax(ts > mfl)
        if np.max(ts) > mfl and am < len(ts) - 1: ts = ts[:am+1]

        bls = self.gettrials(runs, cs, start=-1, end=0, error_trials=-1, lick=-1)
        meanbl = np.nanmean(bls, axis=1)

        vdriven = np.zeros(np.shape(bls)[0], dtype=bool)
        pval /= len(ts) - 1  # Correct for number of time points
        pval /= np.shape(bls)[0]  # Correct for the number of cells
        pval /= ncses  # Correct for number of CSes

        # We will save the maximum inverse p values
        maxinvps = np.zeros(np.shape(bls)[0], dtype=np.float64)

        for i in range(len(ts) - 1):
            start = float(ts[i])/fr
            end = float(ts[i+1])/fr
            trs = self.gettrials(runs, cs, start=start, end=end,
                                 error_trials=0, lick=100 if not nolick else -1)

            for c in range(np.shape(trs)[0]):
                if np.nanmean(trs[c, :]) > meanbl[c]:
                    pv = stats.ranksums(bls[c, :], trs[c, :]).pvalue
                    logpv = -1*np.log(stats.ranksums(bls[c, :], trs[c, :]).pvalue)
                    if logpv > maxinvps[c]: maxinvps[c] = logpv
                    if pv <= pval:
                        vdriven[c] = True

        return maxinvps
Пример #12
0
class Repevent(base.AnalysisBase):
    requires = ['classifier']
    sets = ['repevent_%s' % cs for cs in config.stimuli()] + \
           ['repevent_inactive_frames']
    across = 'run'
    updated = '181005'

    def run(self, run):
        """
        Run all analyses and returns results in a dictionary.

        Parameters
        ----------
        run : Run object

        Returns
        -------
        dict
            All of the output values

        """

        out = self.nanoutput()
        t2p = run.trace2p()
        trs = t2p.trace('deconvolved')

        if run.run_type != 'training':
            c2p = run.classify2p()
            mask = t2p.inactivity()
        else:
            c2p = self.training_classifier(run)

            # Take the times that were during stimuli or during inactivity and block them
            mask = t2p.trialmask(padpre=0.1, padpost=0.5)
            mask = np.bitwise_or(mask, t2p.inactivity(nostim=False))
            mask = np.invert(mask)

        out['repevent_inactive_frames'] = np.sum(mask)

        for cs in config.stimuli():
            if self.analysis('good_%s' % cs):
                out['repevent_%s' % cs] = c2p.events(cs,
                                                     0.1,
                                                     trs,
                                                     mask=mask,
                                                     xmask=True,
                                                     max=2,
                                                     downfor=2,
                                                     maxlen=-1)

        return out

    def training_classifier(self, run):
        """
        Get a training classifier instance

        Parameters
        ----------
        run : Run object
            The run to be classified

        Returns
        -------
        Classify2p object

        """

        # Get a classifier instance
        parameters = deepcopy(self.pars)
        parameters['training-runs'] = [
            r.run for r in run.parent.runs('training') if r.run != run.run
        ]
        parameters['comparison-run'] = run.run

        gm = run.classify2p(parameters)

        return gm