Esempio n. 1
0
def clipWave(W,freq,time,freqRange,timeRange,coi = []):
    '''
    W_clip, freq_clip,time_clip,coi_clip = clipWave(W,freq,time,freqRange, \
        timeRange,coi = [])
    Clips the matrix of wavelet coefficients to the specified freq and time 
        range
    Inputs:
    W - 2D matrix of wavelet coefficients
    freq - Freq vector of length = number of rows of W
    time - Time vector of length = number of cols of W
    freqRange  - 2 element array specifying the min and max freq to clip W to
    timeRange - 2 element array specifying the min and max time to clip W to
    coi - Cone of influence; can be empty
    Outputs:
    W_clip - Clipped W
    freq_clip - Clipped freq
    ...
    '''
    import apCode.SignalProcessingTools as spt
    fInds = spt.nearestMatchingInds(freqRange,freq)
    print(fInds)
    tInds = spt.valsToNearestInds(timeRange,time)    
    W_clip = W[fInds[1]:fInds[0],tInds[0]:tInds[1]]
    freq_clip = freq[fInds[1]:fInds[0]]
    time_clip = time[tInds[0]:tInds[1]]
    if len(coi) >0:
        coi_clip = coi[tInds[0]:tInds[1]]
    else:
        coi_clip = coi
    return W_clip, freq_clip,time_clip,coi_clip
Esempio n. 2
0
def clipWave(W, freq, time, freqRange, timeRange, coi=[]):
    '''
    W_clip, freq_clip,time_clip,coi_clip = clipWave(W,freq,time,freqRange, \
        timeRange,coi = [])
    Clips the matrix of wavelet coefficients to the specified freq and time 
        range
    Inputs:
    W - 2D matrix of wavelet coefficients
    freq - Freq vector of length = number of rows of W
    time - Time vector of length = number of cols of W
    freqRange  - 2 element array specifying the min and max freq to clip W to
    timeRange - 2 element array specifying the min and max time to clip W to
    coi - Cone of influence; can be empty
    Outputs:
    W_clip - Clipped W
    freq_clip - Clipped freq
    ...
    '''
    import apCode.SignalProcessingTools as spt
    fInds = spt.nearestMatchingInds(freqRange, freq)
    print(fInds)
    tInds = spt.valsToNearestInds(timeRange, time)
    W_clip = W[fInds[1]:fInds[0], tInds[0]:tInds[1]]
    freq_clip = freq[fInds[1]:fInds[0]]
    time_clip = time[tInds[0]:tInds[1]]
    if len(coi) > 0:
        coi_clip = coi[tInds[0]:tInds[1]]
    else:
        coi_clip = coi
    return W_clip, freq_clip, time_clip, coi_clip
Esempio n. 3
0
def plotWave(W,
             freq,
             time,
             coi=[],
             powScale='log',
             cmap='coolwarm',
             xlabel='Time (sec)',
             ylabel='Freq (Hz)'):
    '''
    fh = plotWave(W,freq,time,...)
    Plots the matrix of wavelet coefficients W, using the specified freq and time axes
    '''
    import numpy as np
    import apCode.SignalProcessingTools as spt
    import matplotlib.pyplot as plt
    if powScale.lower() == 'log':
        W = np.log2(np.abs(W))
    else:
        W = np.abs(W)
    period = 1 / freq
    dt = time[1] - time[0]
    tt = np.hstack((time[[0, 0]] - dt * 0.5, time, time[[-1, -1]] + dt * 0.5))
    if len(coi) == 0:
        coi = time
    coi_ext = np.log2(np.hstack((freq[[-1, 1]], 1 / coi, freq[[1, -1]])))
    tt = spt.nearestMatchingInds(tt, time)
    coi_ext = spt.nearestMatchingInds(coi_ext, freq)
    freq_log = np.unique(spt.nextPow2(freq))
    fTick = 2**freq_log
    yTick = 1 / fTick
    inds = spt.nearestMatchingInds(yTick, period)
    ytl = (1 / period[inds]).astype(int).ravel()
    fig = plt.imshow(W, aspect='auto', cmap=cmap)
    fig.axes.set_yticks(inds)
    fig.axes.set_yticklabels(np.array(ytl))
    plt.colorbar()

    xTick = np.linspace(0, len(time) - 1, 5).astype(int)
    xtl = np.round(time[xTick] / (time[-1] / 4)) * 0.5
    fig.axes.set_xticks(xTick)
    fig.axes.set_xticklabels(xtl.ravel())
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    #plt.show()
    plt.plot(tt, coi_ext, 'k--')
    return fig
Esempio n. 4
0
def plotWave(W,freq,time,coi = [],powScale = 'log',cmap = 'coolwarm', xlabel = 'Time (sec)', ylabel = 'Freq (Hz)'):
    '''
    fh = plotWave(W,freq,time,...)
    Plots the matrix of wavelet coefficients W, using the specified freq and time axes
    '''
    import numpy as np
    import apCode.SignalProcessingTools as spt
    import matplotlib.pyplot as plt
    if powScale.lower() == 'log':
        W = np.log2(np.abs(W))
    else:
        W = np.abs(W)
    period = 1/freq
    dt = time[1]-time[0]
    tt = np.hstack((time[[0,0]]-dt*0.5,time,time[[-1,-1]]+dt*0.5))
    if len(coi) == 0:
        coi = time
    coi_ext = np.log2(np.hstack((freq[[-1,1]],1/coi,freq[[1,-1]])))
    tt = spt.nearestMatchingInds(tt,time)
    coi_ext = spt.nearestMatchingInds(coi_ext,freq)
    freq_log = np.unique(spt.nextPow2(freq))
    fTick = 2**freq_log
    yTick  = 1/fTick
    inds = spt.nearestMatchingInds(yTick,period)
    ytl = (1/period[inds]).astype(int).ravel()
    fig =plt.imshow(W, aspect = 'auto',cmap = cmap)
    fig.axes.set_yticks(inds)
    fig.axes.set_yticklabels(np.array(ytl))
    plt.colorbar()
    
    xTick = np.linspace(0,len(time)-1,5).astype(int)
    xtl = np.round(time[xTick]/(time[-1]/4))*0.5
    fig.axes.set_xticks(xTick)    
    fig.axes.set_xticklabels(xtl.ravel())   
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    #plt.show()
    plt.plot(tt,coi_ext,'k--')
    return fig
pyData['swim']['startInds'] = pyData_disk['swim']['startInds'][:].astype(
    int) - 1
pyData['swim']['distFromLastStim'] = pyData_disk['swim']['distFromLastStim'][:]
pyData['time'] = pyData_disk['t'][:]
pyData['smooth'] = np.transpose(pyData_disk['smooth']['burst'][:])

pyData['samplingRate'] = np.int(1 / (pyData['time'][1] - pyData['time'][0]))

data['pathToEphysData'] = os.path.join(pyDir, pyFileName)

#%% Geting indices for stacks in ephys data and stims in optical data (can take upto 20 mins)
tic = time.time()

maxInt = int(20e-3 * pyData['samplingRate'])
print('Geting indices for stacks in ephys data and stims in optical data...')
data['inds']['stim'] = spt.nearestMatchingInds(
    pyData['time'].ravel()[pyData['stim']['inds']], data['time'])
data['inds']['stackInit'] = spt.nearestMatchingInds(data['time'],
                                                    pyData['time'],
                                                    processing='parallel')

print(int(time.time() - tic), 'sec')

#%% Subsampling ePhys signals to speed up subsequent regressor convolution and also obtaining some useful indices
tic = time.time()
lpf = 100  # Low pass filter value
stimChan = 1

# Subsample(to speed up with convolution with Ca kernel) smoothed ePhys traces and time vector at about 4 times the Nyquist value
data['ephys'] = pyData.copy()
dt_sub = int(pyData['samplingRate'] / (lpf * 4))
data['ephys']['smooth'] = spt.chebFilt(pyData['smooth'],
Esempio n. 6
0
inds_stim = np.delete(inds_stim, inds_del)
inds_camTrig = spt.levelCrossings(bas[ch_camTrig], thr=2)[0]
dInds = np.diff(bas['t'][inds_camTrig])
inds_del = np.where(dInds <= (0.5 / frameRate))[0] + 1
inds_camTrig = np.delete(inds_camTrig, inds_del)
# plt.plot(bas[ch_camTrig])
# plt.plot(inds_camTrig,bas[ch_camTrig][inds_camTrig],'o')
print('# stims = {}, # of camera triggers = {}'.format(len(inds_stim),
                                                       len(inds_camTrig)))

#%%
#%%
maxAllowedTimeBetweenStimAndCamTrig = 0.5  # In sec
t_preStim = 1
t_postStim = 20
Fs_bas = 6000
Fs_ca = 50
n_preStim = t_preStim * Fs_bas
n_postStim = t_postStim * Fs_bas
inds_camTrigNearStim = inds_camTrig[spt.nearestMatchingInds(
    inds_stim, inds_camTrig)]
t_stim = bas['t'][inds_stim]
t_camTrigNearStim = bas['t'][inds_camTrigNearStim]
inds_tooFar = np.where(
    np.abs(t_stim -
           t_camTrigNearStim) > maxAllowedTimeBetweenStimAndCamTrig)[0]

# plt.plot(bas['t'],bas[ch_stim])
# plt.plot(bas['t'],- bas[ch_camTrig])
# plt.plot(bas['t'][inds_camTrigNearStim], bas[ch_camTrig][inds_camTrigNearStim],'o')
def readPeriStimulusTifImages(tifPaths, basPaths, nBasCh = 16, ch_camTrig = 'patch4', ch_stim = 'patch3',\
                  tifNameStr = '', time_preStim = 1, time_postStim = 10, thr_stim = 1.5,\
                  thr_camTrig = 1, maxAllowedTimeBetweenStimAndCamTrig = 0.5, n_jobs = 1):
    """
    Given the directory to .tif files stored by ScanImage (Bessel beam image settings) and the full
    path to the accompanying bas files returns a dictionary with values holding peri-stimulus
    image data (Ca activity) in trialized format along with some other pertinent info.

    Parameters
    ----------
    tifDir: string
        Path to directory holding .tif files written by ScanImage. In the current setting, each .tif
        file holds nCh*3000 images, where nCh = number of channels.
    basPath: string
        Full path to the bas (BehavAndScan) file accompanying the imaging session.
    nBasCh: scalar
        Number of signal channels in the bas file.
    ch_camTrig: string
        Name of the channel in bas corresponding to the camera trigger signal
    ch_stim: string
        Name of the stimulus signal channel in bas.
    tifNameStr: string
        Only .tif files containing this will be read.
    time_preStim: scalar
        The length of the pre-stimulus time to include when reading images
    time_postStim: scalar
        The length of the post-stimulus time.
    thr_stim: scalar
        Threshold to use for detection of stimuli in the stimulus channel of bas.
    thr_camTrig: scalar
        Threshold to use for detection of camera trigger onsets in the camera trigger channel of bas.
    maxAllowedTimeBetweenStimAndCamTrig: scalar
        If a camera trigger is separated in time by the nearest stimulus by longer than this time
        interval, then ignore this stimulus trial.
    Returns
    -------
    D: dict
        Dictionary contaning the following keys:
        'I': array, (nTrials, nTime, nImageChannels, imageWidth, imageHeight)
            Image hyperstack arranged in conveniently-accessible trialized format.
        'tifInfo': dict
            Dictionary holding useful image metadata. Has following keys:
            'filePaths': list of strings
                Paths to .tif files
            'nImagesInfile': scalar int
                Number of images in each .tif file after accounting of number of
                image channels
            'nChannelsInFile': scalar int
                Number of image channels
        'inds_stim': array of integers, (nStim,)
            Indices in bas coordinates where stimuli occurred.
        'inds_stim_img': array of integers, (nStim,)
            Indices in image coordinates where stimuli occurred
        'inds_camTrig': array of integers, (nCameraTriggers,)
            Indices in bas coordinates corresponding to the onsets of camera triggers.
        'bas': dict
            BehavAndScan data

    """
    import tifffile as tff
    import numpy as np
    import apCode.FileTools as ft
    import apCode.ephys as ephys
    import apCode.SignalProcessingTools as spt
    import apCode.util as util

    #    import os
    def getImgIndsInTifs(tifInfo):
        nImgsInFile_cum = np.cumsum(tifInfo['nImagesInFile'] *
                                    tifInfo['nChannelsInFile'])
        imgIndsInTifs = []
        for i in range(len(nImgsInFile_cum)):
            if i == 0:
                inds_ = np.arange(0, nImgsInFile_cum[i])
            else:
                inds_ = np.arange(nImgsInFile_cum[i - 1], nImgsInFile_cum[i])
            imgIndsInTifs.append(inds_)
        return imgIndsInTifs

    ### Read relevant metadata from tif files in directory
    print('Reading ScanImage metadata from tif files...')

    tifInfo = ft.scanImageTifInfo(tifPaths)
    nCaImgs = np.sum(tifInfo['nImagesInFile'])

    ### Check for consistency in the number of image channels in all files.
    if len(np.unique(tifInfo['nChannelsInFile'])) > 1:
        print('Different number of image channels across files, check files!')
        return None
    nImgCh = tifInfo['nChannelsInFile'][0]
    print(f'{nCaImgs} {nImgCh}-channel images from all tif files')

    ### Get a list of indices corresponding to images in each of the tif files
    inds_imgsInTifs = getImgIndsInTifs(tifInfo)

    ### Read bas file to get stimulus and camera trigger indices required to align images and behavior
    print(
        'Reading and joining bas files, detecting stimuli and camera triggers...'
    )
    basList = [ephys.importCh(bp, nCh=nBasCh) for bp in basPaths]
    bas = concatenateBas(basList)
    inds_stim = spt.levelCrossings(bas[ch_stim], thr=thr_stim)[0]
    if len(inds_stim) == 0:
        print(
            f'Only {len(inds_stim)} stims detected, check channel specification or threshold'
        )
        return dict(bas=bas)
    inds_camTrig = spt.levelCrossings(bas[ch_camTrig], thr=thr_camTrig)[0]
    if len(inds_camTrig) == 0:
        print(
            f'Only {len(inds_camTrig)} cam trigs detected, check channel specification or threshold'
        )
        return dict(bas=bas)
    dt_vec = np.diff(bas['t'][inds_camTrig])
    dt_ca = np.round(np.mean(dt_vec) * 100) / 100
    print('Ca sampling rate = {}'.format(1 / dt_ca))
    inds_del = np.where(dt_vec <= (0.5 * dt_ca))[0] + 1
    inds_camTrig = np.delete(inds_camTrig, inds_del)

    ### Deal with possible mismatch in number of camera trigger indices and number of images in tif files
    if nCaImgs < len(inds_camTrig):
        inds_camTrig = inds_camTrig[:nCaImgs]
        nCaImgs_extra = 0
    elif nCaImgs > len(inds_camTrig):
        nCaImgs_extra = nCaImgs - len(inds_camTrig)
    else:
        nCaImgs_extra = 0
        print('{} extra Ca2+ images'.format(nCaImgs_extra))
    print('{} stimuli and {} camera triggers'.format(len(inds_stim),
                                                     len(inds_camTrig)))

    ### Indices of ca images closest to stimulus
    inds_stim_img = spt.nearestMatchingInds(inds_stim, inds_camTrig)

    ### Find trials where the nearest cam trigger is farther than the stimulus by a certain amount
    inds_camTrigNearStim = inds_camTrig[inds_stim_img]
    t_stim = bas['t'][inds_stim]
    t_camTrigNearStim = bas['t'][inds_camTrigNearStim]
    inds_tooFar = np.where(
        np.abs(t_stim -
               t_camTrigNearStim) > maxAllowedTimeBetweenStimAndCamTrig)[0]
    inds_ca_all = np.arange(nCaImgs)
    nPreStim = int(time_preStim / dt_ca)
    nPostStim = int(time_postStim / dt_ca)
    print("{} pre-stim points, and {} post-stim points".format(
        nPreStim, nPostStim))
    inds_ca_trl = np.array(
        spt.segmentByEvents(inds_ca_all, inds_stim_img + nCaImgs_extra,
                            nPreStim, nPostStim))
    ### Find trials that are too short to include the pre- or post-stimulus period
    trlLens = np.array([len(trl_) for trl_ in inds_ca_trl])
    inds_tooShort = np.where(trlLens < np.max(trlLens))[0]
    inds_trl_del = np.union1d(inds_tooFar, inds_tooShort)
    inds_trl_keep = np.setdiff1d(np.arange(len(inds_ca_trl)), inds_trl_del)

    ### Exclude the above 2 types of trials from consideration
    if len(inds_trl_del) > 0:
        print('Excluding the trials {}'.format(inds_trl_del))
        inds_ca_trl = inds_ca_trl[inds_trl_keep]

    I = []
    print('Reading trial-related images from tif files...')
    nTrls = len(inds_ca_trl)

    def trlImages(inds_ca_trl, inds_imgsInTifs, nImgCh, tifInfo, trl):
        trl_ = np.arange(trl.min() * nImgCh, (trl.max() + 1) * nImgCh)
        loc = util.locateItemsInSetsOfItems(trl_, inds_imgsInTifs)
        I_ = []
        for subInds, supInd in zip(loc['subInds'], loc['supInds']):
            with tff.TiffFile(tifInfo['filePaths'][supInd]) as tif:
                img = tif.asarray(key=subInds)
            I_.extend(img.reshape(-1, nImgCh, *img.shape[1:]))
        I_ = np.array(I_)
        return I_

    if n_jobs < 2:
        chunkSize = int(nTrls / 5)
        for iTrl, trl in enumerate(inds_ca_trl):
            if np.mod(iTrl, chunkSize) == 0:
                print('Trl # {}/{}'.format(iTrl + 1, nTrls))
            I_ = trlImages(inds_ca_trl, inds_imgsInTifs, nImgCh, tifInfo, trl)
            I.append(I_)
    else:
        print('Processing with dask')
        import dask
        from dask.diagnostics import ProgressBar
        for trl in inds_ca_trl:
            I_ = dask.delayed(trlImages)(inds_ca_trl, inds_imgsInTifs, nImgCh,
                                         tifInfo, trl)
            I.append(I_)
        with ProgressBar():
            I = dask.compute(*I)

    D = dict(I = np.squeeze(np.array(I)), tifInfo = tifInfo, inds_stim = inds_stim, inds_stim_img = inds_stim_img,\
             inds_camTrig = inds_camTrig,bas = bas, inds_trl_excluded = inds_trl_del)
    return D
def dataFrameOfMatchedMtrAndCaTrls_singleFish(bas, ca = None, ch_camTrig = 'camTrigger', ch_stim = 'patch3',
                                   ch_switch = 'patch2', thr_camTrig= 4, Fs_bas = 6000, t_pre_bas = 0.2,\
                                   t_post_bas = 1.5, t_pre_ca = 1, t_post_ca = 10, n_jobs = 20):
    """
    Parameters
    ----------
    bas: dict
        Dictionary resulting from reading of BAS file
    ca: array, (nRois, nSamples)
        nSamples is expected to match the number of Camera triggers
    Returns
    -------
    df: Pandas dataframe
    """
    import numpy as np
    from scipy.stats import mode
    import apCode.SignalProcessingTools as spt
    from apCode import util
    import pandas as pd

    def motorFromBas(bas):
        keys = list(bas.keys())
        ind_mtr = util.findStrInList('den', keys)
        if len(ind_mtr) > 0:
            ind_mtr = ind_mtr[-1]
            x = np.squeeze(np.array(bas[keys[ind_mtr]]))
        else:
            if 'ch3' in bas:
                x = np.array([bas['ch3'], bas['ch4']])
            else:
                x = np.array([bas['ch1'], bas['ch2']])
        if x.shape[0] < x.shape[1]:
            x = x.T
        return x

    mtr = motorFromBas(bas)
    stimInds, stimAmps, stimHt = getStimInfo(bas, ch_stim = ch_stim,\
                                             ch_camTrig= ch_camTrig, ch_switch= ch_switch)
    camTrigInds = getCamTrigInds(bas[ch_camTrig], thr=thr_camTrig)

    #    dt_bas = 1/Fs_bas
    dt_ca = np.mean(np.diff(bas['t'][camTrigInds]))
    Fs_ca = int(np.round(1 / dt_ca))
    if np.any(ca == None):
        nFrames = len(camTrigInds)


#        t_ca = np.arange(nFrames)*dt_ca
    else:
        nFrames = ca.shape[1]
        #        t_ca = np.arange(nFrames)*dt_ca
        d = len(camTrigInds) - ca.shape[1]
        if d > 0:
            print(f'Check threshold, {d} more camera triggers than expected')
        elif d < 0:
            print('Check threshold, {d} fewer camera triggers than expected')
    n_pre_bas = int(np.round(t_pre_bas * Fs_bas))
    n_post_bas = int(np.round(t_post_bas * Fs_bas))
    n_pre_ca = int(np.round(t_pre_ca * Fs_ca))
    n_post_ca = int(np.round(t_post_ca * Fs_ca))

    stimFrames = spt.nearestMatchingInds(stimInds, camTrigInds) - 2
    mtr_trl = spt.segmentByEvents(mtr,
                                  stimInds,
                                  n_pre_bas,
                                  n_post_bas,
                                  n_jobs=n_jobs)
    indsVec_ca = np.arange(nFrames)
    indsVec_ca_trl = spt.segmentByEvents(indsVec_ca,
                                         stimFrames,
                                         n_pre_ca,
                                         n_post_ca,
                                         n_jobs=n_jobs)
    trlNum_actual = np.arange(len(mtr_trl))
    lens_mtr = np.array([len(mtr_) for mtr_ in mtr_trl])
    lens_ca = np.array([len(inds_) for inds_ in indsVec_ca_trl])
    inds_del_mtr = np.where(lens_mtr != mode(lens_mtr)[0])[0]
    inds_del_ca = np.where(lens_ca != mode(lens_ca)[0])[0]
    inds_del_trl = np.union1d(inds_del_mtr, inds_del_ca)
    trlNum_actual = np.delete(trlNum_actual, inds_del_trl)
    mtr_trl = list(np.delete(
        mtr_trl, inds_del_trl,
        axis=0))  # Converting to list so that can later put in dataframe
    indsVec_ca_trl = list(np.delete(indsVec_ca_trl, inds_del_trl, axis=0))
    dic = dict(mtr_trl=mtr_trl, caInds_trl=indsVec_ca_trl)
    if not np.any(ca == None):
        if np.ndim(ca) == 1:
            ca = ca[np.newaxis, ...]
        ca_trl = [ca[:, inds_] for inds_ in indsVec_ca_trl]
        dic['ca_trl'] = ca_trl
    trlNum = np.arange(len(mtr_trl))
    dic['trlNum'] = trlNum
    dic['trlNum_actual'] = trlNum_actual
    dic['stimAmp'] = np.delete(stimAmps, inds_del_trl)
    dic['stimLoc'] = np.delete(stimHt, inds_del_trl)
    return pd.DataFrame(dic, columns=dic.keys())
pyData['stim']['inds'] = pyData_disk['stim']['inds'][:].ravel().astype(int)-1
pyData['swim']['startInds'] = pyData_disk['swim']['startInds'][:].astype(int)-1
pyData['swim']['distFromLastStim'] = pyData_disk['swim']['distFromLastStim'][:]
pyData['time'] = pyData_disk['t'][:]
pyData['smooth'] = np.transpose(pyData_disk['smooth']['burst'][:])

pyData['samplingRate'] = np.int(1/(pyData['time'][1]-pyData['time'][0]))

data['pathToEphysData'] = os.path.join(pyDir,pyFileName)

#%% Geting indices for stacks in ephys data and stims in optical data (can take upto 20 mins)
tic = time.time()

maxInt = int(20e-3*pyData['samplingRate'])
print('Geting indices for stacks in ephys data and stims in optical data...')
data['inds']['stim'] = spt.nearestMatchingInds(pyData['time'].ravel()[pyData['stim']['inds']],data['time'])
data['inds']['stackInit'] = spt.nearestMatchingInds(data['time'],pyData['time'],processing = 'parallel')

print(int(time.time()-tic),'sec')


#%% Subsampling ePhys signals to speed up subsequent regressor convolution and also obtaining some useful indices
tic = time.time()
lpf = 100 # Low pass filter value
stimChan = 1

# Subsample(to speed up with convolution with Ca kernel) smoothed ePhys traces and time vector at about 4 times the Nyquist value
data['ephys'] = pyData.copy()
dt_sub = int(pyData['samplingRate']/(lpf*4))
data['ephys']['smooth'] = spt.chebFilt(pyData['smooth'],1/pyData['samplingRate'],lpf,
                      btype = 'low')[::dt_sub]