예제 #1
0
def get_trials_in_recording(exp_path,
                            return_se=False,
                            ignore_dprime=False,
                            se=None,
                            suppress_dprime_error=False,
                            use_sparse=False):
    '''
    Retrieve all the trials in a recording as Trial objects

    Parameters
    ----------
    exp_path : String
        Path to the experiment folder.
    return_se : Bool, optional
        Whether to also return a StatisticExtractor object for the 
        whole experiment. The default is False.

    Returns
    -------
    [Trial] or ([Trial], StatisticExtractor)

    '''

    #Get the appropriate paths for the suite2p info, the timeline,
    #and the trial metadata
    files = os.listdir(exp_path)
    s2p_path = os.path.join(exp_path, 'suite2p', 'plane0')
    timeline_path = os.path.join(
        exp_path, item([s for s in files if 'Timeline.mat' in s]))
    psychstim_path = os.path.join(
        exp_path, item([s for s in files if 'psychstim.mat' in s]))
    trials = []
    if calc_d_prime(psychstim_path) > 1 or ignore_dprime:
        if se == None and not use_sparse:
            se = Recording(exp_path)
        #We need the total number of frames:
        structs = _get_trial_structs(psychstim_path)
        if not use_sparse:
            nframes = se.ops["nframes"]
            times = get_neural_frame_times(timeline_path, nframes)
            licks = get_lick_state_by_frame(timeline_path, times)

        for struct in structs:
            if use_sparse:
                trial = SparseTrial(struct, tolerant=False)
            else:
                trial = Trial(exp_path, struct, se, times, licks)
            trials.append(trial)
        return trials if not return_se else (trials, se)
    elif suppress_dprime_error:
        return None if not return_se else (None, None)
    else:
        raise ValueError("Dprime below 1")
예제 #2
0
    def __init__(self,
                 exp_path,
                 ignore_dprime=False,
                 tolerate_lack_of_eye_video=False,
                 ignore_eye_video=False,
                 tolerate_frametime_discrepancies=True):
        self.exp_path = exp_path
        super().__init__(exp_path)
        self.trials = get_trials_in_recording(exp_path,
                                              return_se=False,
                                              se=self,
                                              ignore_dprime=ignore_dprime,
                                              suppress_dprime_error=False)
        timeline_path = os.path.join(
            exp_path,
            item([s for s in os.listdir(exp_path) if 'Timeline.mat' in s]))

        try:
            self.frame_times = get_neural_frame_times(timeline_path,
                                                      self.ops["nframes"],
                                                      tolerant=False)
        except ValueError as e:
            if tolerate_frametime_discrepancies:
                pass
            else:
                raise e

        #Now we need to get these all as continuous time series.
        (self.trialtime, self.iscorrect, self.side, self.isgo, self.contrast,
         self.trial_id, self.trial_num, self.peritrialtime,
         self.trial_component) = self.get_timeseries()

        self.licks = get_lick_state_by_frame(timeline_path, self.frame_times)
        #This licks attr is the bool series, we want the deltaT series
        self.licks = lick_transform(self.licks)

        #Need to check if this experiment had a video that was processed
        #by our DeepLabCut Network...
        path, exp_id = os.path.split(self.exp_path)
        for file in os.listdir(DLC_ANALYSED_VIDEOS_DIRECTORY):
            if exp_id in file and '.h5' in file:
                hdf_path = os.path.join(DLC_ANALYSED_VIDEOS_DIRECTORY, file)
                if not ignore_eye_video:
                    self.pupil_diameter = get_eye_diameter_at_timepoints(
                        hdf_path, timeline_path, self.frame_times)
                    break
        else:
            if not ignore_eye_video and not tolerate_lack_of_eye_video:
                raise ValueError(
                    f"No associated eyecam footage found at {DLC_ANALYSED_VIDEOS_DIRECTORY}"
                )
            self.pupil_diameter = [np.nan] * self.ops["nframes"]
    def __init__(self, exp_path, merge=True):
        '''
        A nice heatmap figure for all
        neurons' responses over the course of a trial.
        

        Parameters
        ----------
        exp_path : string
            The root path of an experiment.

        '''
        seaborn.set_style("dark")
        print(f"Loading data from {exp_path}")
        self.trials, self.recording = get_trials_in_recording(exp_path,
                                                              return_se=True)
        print("Running rastermap on fluorescence data")
        r = Rastermap()

        #Sort by rastermap embedding
        print("Sorting traces by rastermap ordering")
        self.dF_on_F = self.recording.dF_on_F
        self.dF_on_F[np.isnan(self.dF_on_F)] = 1
        r.fit(self.dF_on_F)
        self.dF_on_F = self.dF_on_F[r.isort]

        #Show overall plot
        timeline_path = os.path.join(
            exp_path,
            item([s for s in os.listdir(exp_path) if 'Timeline.mat' in s]))

        print("Fetching Frame Times...")
        frame_times = get_neural_frame_times(timeline_path,
                                             self.recording.ops["nframes"])
        print("Fetching licking information...")
        self.licks = get_lick_state_by_frame(timeline_path, frame_times)
        print("Aligning frames with trials...")
        self.start_idxs, self.end_idxs = self.get_trial_attributes(frame_times)
        print("...done")
    def __init__(self, exp_path):
        seaborn.set_style("dark")
        print(f"Loading data from {exp_path}")
        self.trials, self.recording = get_trials_in_recording(
            exp_path, return_se=True, ignore_dprime=True)
        print("Running rastermap on fluorescence data")
        r = Rastermap()
        r.fit(self.recording.F)
        #Sort by rastermap embedding
        print("Sorting traces by rastermap ordering")
        self.dF_on_F = self.recording.F[r.isort]
        timeline_path = os.path.join(
            exp_path,
            item([s for s in os.listdir(exp_path) if 'Timeline.mat' in s]))

        print("Fetching Frame Times...")
        frame_times = get_neural_frame_times(timeline_path,
                                             self.recording.ops["nframes"])
        print("Fetching licking information...")
        self.licks = get_lick_state_by_frame(timeline_path, frame_times)
        print("Aligning frames with trials...")
        self.start_idxs, self.end_idxs = self.get_trial_attributes(frame_times)
        print("...done")
예제 #5
0
def get_psychstim_path(exp_path):
    file = item([file for file in listdir(exp_path) if "psychstim" in file])
    return join(exp_path, file)
예제 #6
0
def get_timeline_path(exp_path):
    file = item([file for file in listdir(exp_path) if "Timeline" in file])
    return join(exp_path, file)