Esempio n. 1
0
def plot_lick_triggered_fr(obj,
                           spikes,
                           axis=None,
                           min_inter_lick_time=0.5,
                           preTime=1,
                           postTime=2,
                           plot=True,
                           sdfSigma=0.01,
                           returnSDF=False):
    if axis is None and plot:
        fig, axis = plt.subplots()

    frameTimes = obj.frameAppearTimes

    trial_start_frames = np.array(obj.trials['startframe'])
    trial_end_frames = np.array(obj.trials['endframe'])
    trial_start_times = frameTimes[trial_start_frames]
    trial_end_times = frameTimes[trial_end_frames]

    lick_times = probeSync.get_sync_line_data(obj.syncDataset,
                                              'lick_sensor')[0]
    first_lick_times = lick_times[np.insert(
        np.diff(lick_times) >= min_inter_lick_time, 0, True)]
    first_lick_trials = analysis_utils.get_trial_by_time(
        first_lick_times, trial_start_times, trial_end_times)

    #    hit = np.array(obj.trials['response_type']=='HIT')
    #    earlyResponse = np.array(obj.trials['response_type']=='EARLY_RESPONSE')
    #    falseAlarm = np.array(obj.trials['response_type']=='FA')
    hit = obj.hit
    earlyResponse = obj.earlyResponse
    falseAlarm = obj.falseAlarm

    hit_lick_times = first_lick_times[np.where(hit[first_lick_trials])[0]]
    bad_lick_times = first_lick_times[np.where(
        falseAlarm[first_lick_trials] | earlyResponse[first_lick_trials])[0]]

    hit_psth, t = analysis_utils.getSDF(spikes,
                                        hit_lick_times - preTime,
                                        preTime + postTime,
                                        sigma=sdfSigma)
    bad_psth, t = analysis_utils.getSDF(spikes,
                                        bad_lick_times - preTime,
                                        preTime + postTime,
                                        sigma=sdfSigma)

    if plot:
        hit, = axis.plot(t - 1, hit_psth, 'k')
        bad, = axis.plot(t - 1, bad_psth, 'r')
        axis.legend((hit, bad), ('hit', 'aborted/FA'),
                    loc='best',
                    prop={'size': 8})
        formatFigure(plt.gcf(),
                     axis,
                     xLabel='Time to lick (s)',
                     yLabel='Lick-Trig. FR (Hz)')
        axis.plot([0, 0], axis.get_ylim(), 'k--')

    if returnSDF:
        return hit_psth, bad_psth
Esempio n. 2
0
    def getFrameTimes(self):
        # Get frame times from sync file
        frameRising, frameFalling = probeSync.get_sync_line_data(self.syncDataset, 'stim_vsync')

        #diode = probeSync.get_sync_line_data(syncDataset, 'photodiode')
        #monitorLags = diode[0][4:4+frameFalling[60::120].size][:100] - frameFalling[60::120][:100]

        self.vsyncTimes = frameFalling #use for all data streams except the stimulus frame times, which are subject to monitor lag
        monitorLag = 0.036
        self.frameAppearTimes = frameFalling + monitorLag
Esempio n. 3
0
    def getEyeTrackingData(self):
        # get eye tracking data
        self.eyeFrameTimes = probeSync.get_sync_line_data(self.syncDataset,'cam2_exposure')[0]

        #camPath = glob.glob(os.path.join(dataDir,'cameras','*-1.h5'))[0]
        #camData = h5py.File(camPath)
        #frameIntervals = camData['frame_intervals'][:]

        self.eyeDataPath = glob.glob(os.path.join(self.dataDir,'cameras','*_eyetrack_analysis.hdf5'))
        if len(self.eyeDataPath)>0:
            self.eyeData = h5py.File(self.eyeDataPath[0])
            self.pupilArea = self.eyeData['pupilArea'][:]
            self.pupilX = self.eyeData['pupilX'][:]
            self.negSaccades = self.eyeData['negSaccades'][:]
            self.posSaccades = self.eyeData['posSaccades'][:]
        else:
            self.eyeData = None
Esempio n. 4
0
    def getFrameTimes(self):
        # Get frame times from sync file
        frameRising, frameFalling = probeSync.get_sync_line_data(
            self.syncDataset, 'stim_vsync')

        #diode = probeSync.get_sync_line_data(syncDataset, 'photodiode')
        #monitorLags = diode[0][4:4+frameFalling[60::120].size][:100] - frameFalling[60::120][:100]

        # some experiments appear to have an extra frameFalling at the beginning that doesn't have a corresponding
        # frame in the behavior pkl file; this is probably caused by the DAQ starting high and being reinitialized
        # to zero a few seconds before psychopy and the normal vsyncs start
        self.vsyncTimes = frameFalling[
            1:] if frameFalling[0] < frameRising[0] else frameFalling

        # use vsyncTimes for all data streams except the stimulus frame times, which are subject to monitor lag
        monitorLag = 0.036
        self.frameAppearTimes = self.vsyncTimes + monitorLag
Esempio n. 5
0
#    ax.plot(time,d,'k')
#    for side in ('right','top'):
#        ax.spines[side].set_visible(False)
#    ax.tick_params(direction='out',top=False,right=False,labelsize=12)
##    ax.set_xlim([44.2,46])
##    ax.set_ylim([0,3])
#    ax.set_xlabel('Time (s)',fontsize=14)
#    ax.set_ylabel(lbl,fontsize=14)
#    plt.tight_layout()


# get frame times and detected licks from sync file
syncFile = fileIO.getFile('choose sync file',defaultDir)
syncDataset = sync.Dataset(syncFile)

camFrameTimes = probeSync.get_sync_line_data(syncDataset,'cam1_exposure')[0]

detectedLickTimes = probeSync.get_sync_line_data(syncDataset, 'lick_sensor')[0]
detectedLickTimes = detectedLickTimes[(detectedLickTimes>camFrameTimes[0]) & (detectedLickTimes<camFrameTimes[-1])]
detectedLickFrames = np.searchsorted(camFrameTimes,detectedLickTimes)

#camFile = fileIO.getFile('choose camera metadata file')
#camData = h5py.File(camFile,'r')
#frameIntervals = camData['frame_intervals'][:]
#camData.close()


# resample analog signals to frames
camExp = np.array(analogData[' Dev2/ai1'])
firstAnalogFrame = np.where(camExp[1:]>2.5)[0][0]
camFrameSamples = firstAnalogFrame+np.concatenate(([0],np.round(np.cumsum(np.diff(camFrameTimes))*sampRate).astype(int)))
Esempio n. 6
0

#
syncFiles = []
while True:
    f = fileIO.getFile('choose sync file',fileType='*.h5')
    if f!='':
        syncFiles.append(f)
    else:
        break

ledOnset = []
for i,(f,obj) in enumerate(zip(syncFiles,exps)):
    syncDataset = sync.Dataset(f)
    
    frameRising, frameFalling = probeSync.get_sync_line_data(syncDataset, 'vsync_stim')
    vsyncTimes = frameFalling[1:] if frameFalling[0] < frameRising[0] else frameFalling
    frameAppearTimes = vsyncTimes + obj.monitorLag/obj.frameRate
    
    binWidth = 0.001
    for laserInd,ch in enumerate((11,1)):
        laserRising,laserFalling = probeSync.get_sync_line_data(syncDataset,channel=ch)
        if len(laserRising)>0:
            laserTrials = obj.laser==laserInd
            ct = frameAppearTimes[obj.changeFrames[laserTrials & (obj.changeTrials | obj.catchTrials)].astype(int)]
            fig = plt.figure(figsize=(6,6))
            for j,(t,xlbl) in enumerate(zip((laserRising,laserFalling),('onset','offset'))):
                timeFromChange = t[~obj.laserOnBeforeAbort[laserTrials]]-ct
                if xlbl=='onset':
                    ledOnset.append(timeFromChange)
                ax = fig.add_subplot(2,1,j+1)
Esempio n. 7
0
    def getBehaviorData(self):
        # get behavior data
        if not hasattr(self, 'vsyncTimes'):
            self.getFrameTimes()

        pkl = glob.glob(
            os.path.join(self.dataDir, '*replay-session*behavior*.pkl'))
        if len(pkl) > 0:
            self.pkl_file = pkl[0]
        else:
            self.pkl_file = glob.glob(os.path.join(self.dataDir,
                                                   '*[0-9].pkl'))[0]
        behaviordata = pd.read_pickle(self.pkl_file)
        self.core_data = data_to_change_detection_core(behaviordata)

        self.trials = create_extended_dataframe(
            trials=self.core_data['trials'],
            metadata=self.core_data['metadata'],
            licks=self.core_data['licks'],
            time=self.core_data['time'])

        self.behaviorVsyncCount = self.core_data[
            'time'].size  # same as self.trials['endframe'].values[-1] + 1

        self.flashFrames = np.array(self.core_data['visual_stimuli']['frame'])
        self.flashImage = self.core_data['visual_stimuli']['image_name']
        self.changeFrames = np.array(self.trials['change_frame']).astype(
            int) + 1  #add one to correct for change frame indexing problem
        self.initialImage = np.array(self.trials['initial_image_name'])
        self.changeImage = np.array(self.trials['change_image_name'])

        self.images = self.core_data['image_set']['images']
        newSize = tuple(int(s / 10) for s in self.images[0].shape[::-1])
        self.imagesDownsampled = [
            cv2.resize(img, newSize, interpolation=cv2.INTER_AREA)
            for img in self.images
        ]
        self.imageNames = [
            str(i['image_name'], 'utf-8')
            for i in self.core_data['image_set']['image_attributes']
        ]

        candidateOmittedFlashFrames = behaviordata['items']['behavior'][
            'stimuli']['images']['flashes_omitted']
        drawlog = behaviordata['items']['behavior']['stimuli']['images'][
            'draw_log']
        self.omittedFlashFrames = np.array(
            [c for c in candidateOmittedFlashFrames if not drawlog[c]])
        imageFrameIndexBeforeOmitted = np.searchsorted(
            self.core_data['visual_stimuli']['frame'],
            self.omittedFlashFrames) - 1
        self.omittedFlashImage = np.array(
            self.core_data['visual_stimuli']
            ['image_name'])[imageFrameIndexBeforeOmitted]

        self.behaviorStimDur = np.array(
            self.core_data['visual_stimuli']['duration'])
        self.preGrayDur = np.stack(
            self.trials['blank_duration_range'])  # where is actual gray dur
        self.lastBehaviorTime = self.frameAppearTimes[
            self.trials['endframe'].values[-1]]

        # align trials to sync
        self.trial_start_frames = np.array(self.trials['startframe'])
        self.trial_end_frames = np.array(self.trials['endframe'])
        self.trial_start_times = self.frameAppearTimes[self.trial_start_frames]
        self.trial_end_times = self.frameAppearTimes[self.trial_end_frames]

        # trial info
        self.autoRewarded = np.array(self.trials['auto_rewarded']).astype(bool)
        self.earlyResponse = np.array(
            self.trials['response_type'] == 'EARLY_RESPONSE')
        self.ignore = self.earlyResponse | self.autoRewarded
        self.miss = np.array(self.trials['response_type'] == 'MISS')
        self.hit = np.array(self.trials['response_type'] == 'HIT')
        self.falseAlarm = np.array(self.trials['response_type'] == 'FA')
        self.correctReject = np.array(self.trials['response_type'] == 'CR')

        # get running data
        self.behaviorRunTime = self.vsyncTimes[self.core_data['running'].frame]
        self.behaviorRunSpeed = self.core_data['running'].speed
        self.behaviorRunDx = self.core_data['running'].dx

        # get run start times
        self.behaviorRunStartTimes = find_run_transitions(
            self.behaviorRunSpeed, self.behaviorRunTime)

        #get lick times
        self.lickTimes = probeSync.get_sync_line_data(self.syncDataset,
                                                      channel=31)[0]
        if len(self.lickTimes) == 0:
            self.lickTimes = self.vsyncTimes[np.concatenate(
                [f for f in self.trials['lick_frames']]).astype(int)]

        # get reward times for all trials (nan for non-rewarded trials)
        rewardTimes = []
        for f in self.trials['reward_frames']:
            if len(f) > 0:
                rewardTimes.append(self.vsyncTimes[f[0]])
            else:
                rewardTimes.append(np.nan)
        self.rewardTimes = np.array(rewardTimes)