コード例 #1
0
    def __init__(self, eyecam_video_file, behav_video_file,
                 matlab_timeline_file, dlc_h5_file, exp_path):
        self.eyecam_video_file = eyecam_video_file
        self.behav_video_file = behav_video_file
        self.fig, axes = plt.subplots(nrows=2, ncols=3)

        ((self.behavvid_axis, self.eyevid_axis, self.pupilsize_axis),
         (self.trial_axis, self.lick_axis, self.neuropil_axis)) = axes

        self.all_axes = axes.flatten()

        self.frame_times = get_eyecam_frame_times(matlab_timeline_file)
        self.pupil_sizes = get_pupil_size_at_each_eyecam_frame(dlc_h5_file)
        self.frame_times = self.frame_times[:self.pupil_sizes.shape[0]]
        self.licks = get_lick_state_by_frame(matlab_timeline_file,
                                             self.frame_times)

        self.trials, self.recording = get_trials_in_recording(
            exp_path, ignore_dprime=True, return_se=True)

        self.neuropil_traces = self.get_neuropil_for_each_eyecam_frame(
            matlab_timeline_file)

        nframes = self.recording.ops['nframes']
        neural_frame_times = get_neural_frame_times(matlab_timeline_file,
                                                    nframes)
        self.neural_frame_times = neural_frame_times

        self.trial_state = self.get_trial_state_for_each_eyecam_frame()
コード例 #2
0
def get_trials_in_recording(exp_path,
                            return_se=False,
                            ignore_dprime=False,
                            se=None,
                            suppress_dprime_error=False,
                            use_sparse=False):
    '''
    Retrieve all the trials in a recording as Trial objects

    Parameters
    ----------
    exp_path : String
        Path to the experiment folder.
    return_se : Bool, optional
        Whether to also return a StatisticExtractor object for the 
        whole experiment. The default is False.

    Returns
    -------
    [Trial] or ([Trial], StatisticExtractor)

    '''

    #Get the appropriate paths for the suite2p info, the timeline,
    #and the trial metadata
    files = os.listdir(exp_path)
    s2p_path = os.path.join(exp_path, 'suite2p', 'plane0')
    timeline_path = os.path.join(
        exp_path, item([s for s in files if 'Timeline.mat' in s]))
    psychstim_path = os.path.join(
        exp_path, item([s for s in files if 'psychstim.mat' in s]))
    trials = []
    if calc_d_prime(psychstim_path) > 1 or ignore_dprime:
        if se == None and not use_sparse:
            se = Recording(exp_path)
        #We need the total number of frames:
        structs = _get_trial_structs(psychstim_path)
        if not use_sparse:
            nframes = se.ops["nframes"]
            times = get_neural_frame_times(timeline_path, nframes)
            licks = get_lick_state_by_frame(timeline_path, times)

        for struct in structs:
            if use_sparse:
                trial = SparseTrial(struct, tolerant=False)
            else:
                trial = Trial(exp_path, struct, se, times, licks)
            trials.append(trial)
        return trials if not return_se else (trials, se)
    elif suppress_dprime_error:
        return None if not return_se else (None, None)
    else:
        raise ValueError("Dprime below 1")
コード例 #3
0
    def __init__(self,
                 exp_path,
                 ignore_dprime=False,
                 tolerate_lack_of_eye_video=False,
                 ignore_eye_video=False,
                 tolerate_frametime_discrepancies=True):
        self.exp_path = exp_path
        super().__init__(exp_path)
        self.trials = get_trials_in_recording(exp_path,
                                              return_se=False,
                                              se=self,
                                              ignore_dprime=ignore_dprime,
                                              suppress_dprime_error=False)
        timeline_path = os.path.join(
            exp_path,
            item([s for s in os.listdir(exp_path) if 'Timeline.mat' in s]))

        try:
            self.frame_times = get_neural_frame_times(timeline_path,
                                                      self.ops["nframes"],
                                                      tolerant=False)
        except ValueError as e:
            if tolerate_frametime_discrepancies:
                pass
            else:
                raise e

        #Now we need to get these all as continuous time series.
        (self.trialtime, self.iscorrect, self.side, self.isgo, self.contrast,
         self.trial_id, self.trial_num, self.peritrialtime,
         self.trial_component) = self.get_timeseries()

        self.licks = get_lick_state_by_frame(timeline_path, self.frame_times)
        #This licks attr is the bool series, we want the deltaT series
        self.licks = lick_transform(self.licks)

        #Need to check if this experiment had a video that was processed
        #by our DeepLabCut Network...
        path, exp_id = os.path.split(self.exp_path)
        for file in os.listdir(DLC_ANALYSED_VIDEOS_DIRECTORY):
            if exp_id in file and '.h5' in file:
                hdf_path = os.path.join(DLC_ANALYSED_VIDEOS_DIRECTORY, file)
                if not ignore_eye_video:
                    self.pupil_diameter = get_eye_diameter_at_timepoints(
                        hdf_path, timeline_path, self.frame_times)
                    break
        else:
            if not ignore_eye_video and not tolerate_lack_of_eye_video:
                raise ValueError(
                    f"No associated eyecam footage found at {DLC_ANALYSED_VIDEOS_DIRECTORY}"
                )
            self.pupil_diameter = [np.nan] * self.ops["nframes"]
コード例 #4
0
    def get_neuropil_for_each_eyecam_frame(self, matlab_timeline_file):
        '''
        Get the neuropil brightness at the time each eyecam frame was captured.

        Parameters
        ----------
        matlab_timeline_file : TYPE
            DESCRIPTION.

        Returns
        -------
        neuropil_series : array of float, shape (timepoints,K,K)
            Array of neuropil traces, reshaped into KxK squares for 
            displaying with imshow.

        '''
        #Each value of this array is a neural frame's index,
        #each index of this list is an eyecam frame's index,
        #such that the series are closest to aligned!
        nframes = self.recording.ops['nframes']
        neural_frame_times = get_neural_frame_times(matlab_timeline_file,
                                                    nframes)
        closest_neural_to_each_eyecam = get_nearest_frame_to_each_timepoint(
            neural_frame_times, self.frame_times)
        self.times_of_closest_neural = neural_frame_times[
            closest_neural_to_each_eyecam]
        #So now we can get the neuropil df/f at the time each eyecam frame
        #was captured
        neuropil_series = self.recording.Fneu[:, closest_neural_to_each_eyecam]
        #But I want to put this in an imshow format...so time for some hacky
        #garbage
        rois, timepoints = neuropil_series.shape
        #find the largest lesser square number and its root
        root_rois_to_plot = np.floor(rois**0.5)
        root_rois_to_plot = root_rois_to_plot.astype(int)
        rois_to_plot = root_rois_to_plot**2
        #lop off extraneous neuropil regions
        neuropil_series = neuropil_series[:rois_to_plot, :]
        neuropil_series = neuropil_series.reshape(root_rois_to_plot,
                                                  root_rois_to_plot, -1)
        neuropil_series = neuropil_series.transpose(2, 0, 1)
        return neuropil_series
コード例 #5
0
    def __init__(self, exp_path, merge=True):
        '''
        A nice heatmap figure for all
        neurons' responses over the course of a trial.
        

        Parameters
        ----------
        exp_path : string
            The root path of an experiment.

        '''
        seaborn.set_style("dark")
        print(f"Loading data from {exp_path}")
        self.trials, self.recording = get_trials_in_recording(exp_path,
                                                              return_se=True)
        print("Running rastermap on fluorescence data")
        r = Rastermap()

        #Sort by rastermap embedding
        print("Sorting traces by rastermap ordering")
        self.dF_on_F = self.recording.dF_on_F
        self.dF_on_F[np.isnan(self.dF_on_F)] = 1
        r.fit(self.dF_on_F)
        self.dF_on_F = self.dF_on_F[r.isort]

        #Show overall plot
        timeline_path = os.path.join(
            exp_path,
            item([s for s in os.listdir(exp_path) if 'Timeline.mat' in s]))

        print("Fetching Frame Times...")
        frame_times = get_neural_frame_times(timeline_path,
                                             self.recording.ops["nframes"])
        print("Fetching licking information...")
        self.licks = get_lick_state_by_frame(timeline_path, frame_times)
        print("Aligning frames with trials...")
        self.start_idxs, self.end_idxs = self.get_trial_attributes(frame_times)
        print("...done")
コード例 #6
0
    def __init__(self, exp_path):
        seaborn.set_style("dark")
        print(f"Loading data from {exp_path}")
        self.trials, self.recording = get_trials_in_recording(
            exp_path, return_se=True, ignore_dprime=True)
        print("Running rastermap on fluorescence data")
        r = Rastermap()
        r.fit(self.recording.F)
        #Sort by rastermap embedding
        print("Sorting traces by rastermap ordering")
        self.dF_on_F = self.recording.F[r.isort]
        timeline_path = os.path.join(
            exp_path,
            item([s for s in os.listdir(exp_path) if 'Timeline.mat' in s]))

        print("Fetching Frame Times...")
        frame_times = get_neural_frame_times(timeline_path,
                                             self.recording.ops["nframes"])
        print("Fetching licking information...")
        self.licks = get_lick_state_by_frame(timeline_path, frame_times)
        print("Aligning frames with trials...")
        self.start_idxs, self.end_idxs = self.get_trial_attributes(frame_times)
        print("...done")
from accdatatools.Utils.path import get_exp_path
from accdatatools.Observations.recordings import Recording
from accdatatools.Timing.synchronisation import (get_neural_frame_times,
                                                 get_lick_state_by_frame)

experiment_ID = "2016-10-07_03_CFEB027"
experiment_path = get_exp_path(experiment_ID, "H:\\")

suite2p_path = os.path.join(experiment_path, "suite2p", "plane0")

timeline_path = os.path.join(experiment_path,
                             "2016-10-07_03_CFEB027_Timeline.mat")

exp_recording = Recording(suite2p_path)

frame_times = get_neural_frame_times(timeline_path,
                                     exp_recording.ops["nframes"])

licking = get_lick_state_by_frame(timeline_path, frame_times)

corrs = [pearsonr(x, licking)[0] for x in exp_recording.dF_on_F]
corrs_isort = np.argsort(corrs)
to_plot = exp_recording.dF_on_F[corrs_isort]
fig, ax = plt.subplots()
lick_frame = np.nonzero(licking)
max_brightness = np.percentile(to_plot, 99)
ax.imshow(np.clip(to_plot[-20:], -0.2, max_brightness),
          origin='lower',
          aspect=5)
ax.vlines(lick_frame, -15, -10)
ax.set_xlim(0, 1000)
fig.show()