Exemple #1
0
    def plot_with_labels_interact(self,
                                  ta,
                                  x=None,
                                  cmap='tab20',
                                  figsize=(20, 10),
                                  marker_size=10,
                                  line_alpha=0.2,
                                  ylim=(-150, 150),
                                  title='Tail angles with GMM labels'):

        import plotly.graph_objs as go
        if isinstance(cmap, str):
            cmap = eval(f'plt.cm.{cmap}')
        labels, features = self.predict(ta)
        if x is None:
            x = np.arange(ta.shape[1])
        y = ta[-1]
        if self.pk_thr_ is not None:
            pks = spt.findPeaks(y, thr=self.pk_thr_, pol=0, thrType='rel')[0]
        else:
            pks = np.arange(len(y))

        line = go.Scatter(x=x,
                          y=y,
                          mode='lines',
                          opacity=line_alpha,
                          marker=dict(color='black'),
                          name='ta')
        scatters = []
        scatters.append(line)
        for iLbl, lbl in enumerate(np.unique(labels)):
            clr = f'rgba{cmap(lbl/self.n_gmm_)}'
            inds = np.where(labels == lbl)[0]
            inds = np.intersect1d(inds, pks)
            scatter = go.Scatter(x=x[inds],
                                 y=y[inds],
                                 mode='markers',
                                 marker=dict(color=clr,
                                             symbol=lbl,
                                             size=marker_size),
                                 name=f'Lbl-{lbl}')
            scatters.append(scatter)
        fig = go.Figure(scatters)
        if ylim is not None:
            ylim = np.array(ylim)
            ylim[0] = np.minimum(ylim[0], y.min())
            ylim[1] = np.maximum(ylim[1], y.max())
            fig.layout.yaxis.range = ylim
        fig.layout.xaxis.range = [x[0], x[-1]]
        fig.update_layout(title=title)
        # fig.show()
        # figName = f'Fig-{util.timestamp()}_trl-{iTrl}.html'
        # fig.write_html(os.path.join(figDir,figName))
        return fig
Exemple #2
0
 def fit(self, ta):
     """Fit model to tail angles. This includes preprocessing wherein
     SVD-based feature extraction is performed, followed by PCA for
     dimensionality reduction, if specfied.
     Parameters
     ----------
     self: object
         Instance of initiated SvdGmm class
     ta: array, (nPointsAlongTail, nTimePoints)
         Tail angles array
     Returns
     -------
     self: object
         Trained SvdGmm model.
     """
     if self.svd is None:
         svd = TruncatedSVD(n_components=self.n_svd_,
                            random_state=self.random_state_).fit(ta.T)
     else:
         svd = self.svd
     V = svd.transform(ta.T)
     dv = np.gradient(V)[0]
     ddv = np.gradient(dv)[0]
     X = np.c_[V, dv, ddv]
     if self.use_envelopes_:
         features = max_min_envelopes(X.T).T
     scaler = StandardScaler(with_mean=self.scaler_withMean_).fit(features)
     features = scaler.transform(features)
     if self.pk_thr_ is not None:
         y = ta[-1]
         pks = spt.findPeaks(y, thr=self.pk_thr_, thrType='rel', pol=0)[0]
         print(f'Peaks are {round(100*len(pks)/len(y), 1)}% of all samples')
         features = features[pks, :]
     if self.pca_percVar_ is not None:
         pca = PCA(n_components=self.pca_percVar_,
                   random_state=self.random_state_).fit(features)
         features = pca.transform(features)
         pca.n_components_ = features.shape[1]
     else:
         pca = None
     print('Fitting GMM..')
     gmm = GMM(n_components=self.n_gmm_,
               random_state=self.random_state_,
               covariance_type=self.covariance_type_,
               **self.gmm_kwargs_)
     gmm = gmm.fit(features)
     self.svd = svd
     self.scaler = scaler
     self.pca = pca
     self.gmm = gmm
     return self
def getStimInfo(bas,ch_stim:str = 'patch3',ch_switch:str = 'patch2',\
                ch_camTrig = 'camTrigger', minPkDist =5*6000):
    import numpy as np
    import apCode.SignalProcessingTools as spt
    pks, amps = spt.findPeaks(np.array(bas[ch_stim]).flatten(),
                              thr=0.5,
                              pol=1,
                              minPkDist=minPkDist)
    inds_keep = np.where(amps > 0)[0]
    pks = pks[inds_keep]
    amps = amps[inds_keep]
    foo = np.array(bas[ch_switch]).flatten()[pks]
    ht = np.array(['T'] * len(pks))
    ht[np.where(foo > 1)] = 'H'
    return pks, amps, ht
Exemple #4
0
def stimInds(bas,thr = 1, stimCh:str = 'stim0', minStimDist:int =5*6000, 
             normalize:bool = True):
    """
    bas: dic
        BehaveAndScan file read by "importCh".
    thr: scalar or str
        Threshold for detecting stimuli. If 'auto', then estimates a threshold (assuming)
        that stimulus polarity is positive (i.e. large positive values are stimuli)
    stimChannels: list of strings
        List of the names of stimulus channels.
    minStimDist: int
        Minimum distance (# of samples) between successive stimuli. Set 0
    normalize:bool
        Whether to normalize the stimulus channel before detecting stimuli.
        If True, converts stim channel to z-score units. Useful option when 
        absolute threshold value is unknown.
    
    """
    import numpy as np
    import apCode.SignalProcessingTools as spt
    from apCode.volTools import getGlobalThr
    
    x = bas[stimCh].copy()
    if normalize:
        x = spt.zscore(x)

    if isinstance(thr, str):
        if thr.lower() == 'auto':
            x_pos = x[np.where(x>=0)]
            thr = getGlobalThr(x_pos)
    pks = spt.findPeaks(x,thr = thr, pol =1, minPkDist = minStimDist)
    if len(pks)>0:
        return pks[0]
    else:
        print('No stimuli found!')
        return None   
saveDir = os.path.join(inputDir, 'proc')
imgName = 'K-means clustering of Alx cells_clstrs unord' + '.tif'
tff.imsave(os.path.join(saveDir, imgName),
           np.transpose(imgStack, [0, 3, 1, 2]))

#%% REGRESSION
import apCode.AnalyzeEphysData as aed

# Chose # 2 after visual inspection (Note: Run Kmeans with k-means++
#  init and unordered clusters before runing this)
centroid_M = centroids[0, :]

dt = data['time'][2] - data['time'][1]
centroid_M = spt.zscore(spt.chebFilt(centroid_M, dt, 0.01, btype='high'))
thr_Ca = volt.getGlobalThr(centroid_M)
pks = spt.findPeaks(centroid_M, thr=thr_Ca, minPkDist=30)

plt.figure(figsize=(16, 6))
plt.style.use('dark_background')
plt.subplot(131)
plt.plot(data['time'], centroid_M)
plt.plot(data['time'][pks[0]], centroid_M[pks[0]], 'ro')
plt.xlim(100, 500)
plt.xlabel('Time (s)')
plt.ylabel('dF/F')
plt.title('Some stim-elicited responses in chosen centroid'
          '\n Shown peaks used to average responses')
centroid_M_seg = aed.SegmentDataByEvents(centroid_M, pks[0], 20, 50, axis=0)
trlLens = np.array([len(trl) for trl in centroid_M_seg])
shortTrls = np.where(trlLens < scipy.stats.mode(trlLens)[0])[0]
centroid_M_seg = np.delete(centroid_M_seg, shortTrls, axis=0)
# In[112]:


importlib.reload(aed)
pre = aed.import10ch(os.path.join(epDir,preFile))
post = aed.import10ch(os.path.join(epDir,postFile))
print(pre.keys())
print(time.ctime())


# In[155]:


#%% Detect and check stimulus indices
pre['stimInds'] = spt.findPeaks(spt.zscore(pre[stimCh]),thr=3)[0]-2
post['stimInds'] = spt.findPeaks(spt.zscore(post[stimCh]),thr=3)[0]-2

xmin = np.max((pre['t'][0],post['t'][0]))
xmax = np.min((pre['t'][-1],post['t'][-1]))
plt.style.use(('seaborn-dark','seaborn-colorblind','seaborn-poster'))
plt.subplot(2,1,1)
plt.plot(pre['t'],pre[stimCh])
plt.plot(pre['t'][pre['stimInds']],pre[stimCh][pre['stimInds']],'o',markersize =15)
plt.xlim(xmin,xmax)
plt.title('Pre-Ablation')

plt.subplot(2,1,2)
plt.plot(post['t'],post[stimCh])
plt.plot(post['t'][post['stimInds']],post[stimCh][post['stimInds']],'o',markersize =15)                                 
plt.xlim(xmin,xmax)
Exemple #7
0
def expand_on_bends(df_trl,
                    Fs=500,
                    tPre_ms=100,
                    bendThr=10,
                    minLat_ms=5,
                    maxGap_ms=100):
    """Takes dataframe where each row contains single trial information and
    expands such that each row contains single bend information
    Parameters
    ----------
    df_trl: pandas dataframe, (nTrlsInTotal, nVariables)
    Fs: int
        Sampling frequency when collecting data(images)
    nPre_ms: scalar

    """
    import apCode.SignalProcessingTools as spt
    minPkDist = int((10e-3) * Fs)
    nPre = tPre_ms * 1e-3 * Fs
    minLat = minLat_ms * 1e-3 * Fs
    maxGap = maxGap_ms * 1e-3 * Fs
    df_bend = []
    for iTrl in np.unique(df_trl.trlIdx_glob):
        df_now = df_trl.loc[df_trl.trlIdx_glob == iTrl]
        y = df_now.iloc[0]['tailAngles'][-1]
        y = spt.chebFilt(y, 1 / Fs, (5, 60), btype='bandpass')
        pks = spt.findPeaks(y,
                            thr=bendThr,
                            thrType='rel',
                            pol=0,
                            minPkDist=minPkDist)[0]
        if len(pks) > 3:
            dpks = np.diff(pks)
            tooSoon = np.where(pks < (nPre + minLat))[0]
            tooSparse = np.where(dpks > maxGap)[0] + 1
            inds_del = np.union1d(tooSoon, tooSparse)
            pks = np.delete(pks, inds_del, axis=0)
        if len(pks) > 3:
            nBends = len(pks)
            bendIdx = np.arange(nBends)
            bendSampleIdxInTrl = pks
            bendAmp = y[pks]
            bendAmp_abs = np.abs(bendAmp)
            bendAmp_rel = np.insert(np.abs(np.diff(bendAmp)), 0,
                                    np.abs(bendAmp[0]))
            bendInt_ms = np.gradient(pks) * (1 / Fs) * 1000
            onset_ms = (pks[0] - nPre + 1) * (1 / Fs) * 1000
        else:
            nBends = 0
            bendIdx, bendAmp, bendAmp_abs, bendAmp_rel, bendInt_ms =\
                [np.nan for _ in range(5)]
            bendsampleIdxInTrl, onset_ms = [np.nan for _ in range(2)]
        dic = dict(trlIdx_glob=iTrl,
                   nBends=nBends,
                   bendIdx=bendIdx,
                   bendSampleIdxInTrl=bendSampleIdxInTrl,
                   bendAmp=bendAmp,
                   bendAmp_abs=bendAmp_abs,
                   bendAmp_rel=bendAmp_rel,
                   bendInt_ms=bendInt_ms,
                   onset_ms=onset_ms)
        df_now = pd.DataFrame(dic)
        df_bend.append(df_now)
    df_bend = pd.concat(df_bend, ignore_index=True)
    return pd.merge(df_trl, df_bend, on='trlIdx_glob')
saveDir = os.path.join(inputDir, 'proc')
imgName = 'K-means clustering of Alx cells_clstrs unord' + '.tif'
tff.imsave(os.path.join(saveDir,imgName),np.transpose(imgStack,[0,3,1,2]))


#%% REGRESSION
import apCode.AnalyzeEphysData as aed

# Chose # 2 after visual inspection (Note: Run Kmeans with k-means++ 
#  init and unordered clusters before runing this)
centroid_M = centroids[0,:]

dt = data['time'][2] - data['time'][1]
centroid_M = spt.zscore(spt.chebFilt(centroid_M,dt,0.01,btype='high'))
thr_Ca = volt.getGlobalThr(centroid_M)
pks = spt.findPeaks(centroid_M,thr=thr_Ca, minPkDist=30)

plt.figure(figsize = (16,6))
plt.style.use('dark_background')
plt.subplot(131)
plt.plot(data['time'],centroid_M)
plt.plot(data['time'][pks[0]],centroid_M[pks[0]],'ro')
plt.xlim(100,500)
plt.xlabel('Time (s)')
plt.ylabel('dF/F')
plt.title('Some stim-elicited responses in chosen centroid' 
          '\n Shown peaks used to average responses')
centroid_M_seg = aed.SegmentDataByEvents(centroid_M,pks[0],20,50,axis =0)
trlLens = np.array([len(trl) for trl in centroid_M_seg])
shortTrls = np.where(trlLens < scipy.stats.mode(trlLens)[0])[0]
centroid_M_seg = np.delete(centroid_M_seg,shortTrls,axis = 0)
dt = 1 / Fs
preStimPts = preStimPer * Fs
postStimPts = postStimPer * Fs

# In[112]:

importlib.reload(aed)
pre = aed.import10ch(os.path.join(epDir, preFile))
post = aed.import10ch(os.path.join(epDir, postFile))
print(pre.keys())
print(time.ctime())

# In[155]:

#%% Detect and check stimulus indices
pre['stimInds'] = spt.findPeaks(spt.zscore(pre[stimCh]), thr=3)[0] - 2
post['stimInds'] = spt.findPeaks(spt.zscore(post[stimCh]), thr=3)[0] - 2

xmin = np.max((pre['t'][0], post['t'][0]))
xmax = np.min((pre['t'][-1], post['t'][-1]))
plt.style.use(('seaborn-dark', 'seaborn-colorblind', 'seaborn-poster'))
plt.subplot(2, 1, 1)
plt.plot(pre['t'], pre[stimCh])
plt.plot(pre['t'][pre['stimInds']],
         pre[stimCh][pre['stimInds']],
         'o',
         markersize=15)
plt.xlim(xmin, xmax)
plt.title('Pre-Ablation')

plt.subplot(2, 1, 2)
Exemple #10
0
def estimateCaDecayKinetics(time,
                            signals,
                            p0=None,
                            thr=2,
                            preTime=10,
                            postTime=40):
    """
    Given a time vector and Ca signal matrix of shape = (C,T), where
        C = # of cells, and T = # of time points (must match length of time
        vector), returns output of shape = (nSamples, 2), where the 1st and
        2nd columns contain the fast and slow decay tau estimates after
        fitting Ca2+ signals with  double exponential
    Parameters:
    time - Time vector of length T
    signals - Ca signals array of shape (nSamples,T)
    p0 - Array-like, (tau_fast, tau_slow, wt_fast), where tau_fast is the
        fast decay time constant (in sec), tau_slow is the slow decay
        constant, and wt_fast is the weight of the fast exponential (<1)
        for fitting the signal as a weighted sum of the fast and slow
        exponential. Default is None, in which case fitting optimization
        begins without initial estimate
    thr - Threshold for peak detection in Ca signals, in units of zscore
    preTime - Pre-peak time length of the Ca signals to include for segmentation
    postTime - Post-peak "           "          "               "
    Avinash Pujala, JRC, 2017

    """
    import numpy as np
    from scipy.optimize import curve_fit as cf
    import apCode.SignalProcessingTools as spt
    import apCode.AnalyzeEphysData as aed

    def doubleExp(time, tau1, tau2, wt1):
        wt2 = 1 - wt1
        time = time - time[0]
        e = wt1 * np.exp(-time / tau1) + wt2 * np.exp(-time / tau2)
        return e

    def listToArray(x):
        lens = [len(item) for item in x]
        lenOfLens = len(lens)
        lens = lens[np.min((lenOfLens - 1, 2))]
        a = np.zeros((len(x), lens))
        delInds = []
        for itemNum, item in enumerate(x):
            if len(item) == lens:
                a[itemNum, :] = item
            else:
                delInds.append(itemNum)
        a = np.delete(a, delInds, axis=0)
        return a, delInds

    if np.ndim(signals) == 1:
        signals = np.reshape(signals, (1, len(signals)))
    dt = time[2] - time[1]
    pts_post = np.round(postTime / dt).astype(int)
    pts_pre = np.round(preTime / dt).astype(int)
    x_norm = spt.zscore(signals, axis=1)
    x_seg, params, x_seg_fit = [], [], []
    nSamples = np.shape(signals)[0]
    excludedSamples = np.zeros((nSamples, 1))
    for nSample in np.arange(nSamples):
        inds_pk = spt.findPeaks(x_norm[nSample, :], thr=thr, ampType='rel')[0]
        if len(inds_pk) == 0:
            print('Peak detection failed for sample #', nSample,
                  '. Try lowering threshold')
            excludedSamples[nSample] = 1
        else:
            blah = aed.SegmentDataByEvents(signals[nSample, :],
                                           inds_pk,
                                           pts_pre,
                                           pts_post,
                                           axis=0)
            blah = listToArray(blah)[0]
            blah = np.mean(blah, axis=0)
            x_seg.append(blah)
            ind_max = np.where(blah == np.max(blah))[0][0]
            y = spt.standardize(blah[ind_max:])
            t = np.arange(len(y)) * dt
            popt, pcov = cf(doubleExp, t, y, p0=[10, 20, 0.5], bounds=(0, 20))
            if popt[0] > popt[1]:
                popt[0:2] = popt[2:0:-1]
                popt[-1] = 1 - popt[-1]
            params.append(popt)
            foo = doubleExp(t, popt[0], popt[1], popt[2])
            x_seg_fit.append(foo)
    excludedSamples = np.where(excludedSamples)[0]
    includedSamples = np.setdiff1d(np.arange(nSamples), excludedSamples)
    x_seg, delInds = listToArray(x_seg)
    params = np.delete(np.array(params), delInds, axis=0)
    delInds = includedSamples[delInds]
    if len(delInds) > 0:
        print(
            'Sample #', delInds,
            'excluded for short segment length. Consider decreasing pre-peak time length'
        )
    excludedSamples = np.union1d(delInds, excludedSamples)

    x_seg = spt.standardize(np.array(x_seg), axis=1)
    x_seg_fit = np.array(listToArray(x_seg_fit)[0])
    out = {
        'raw': x_seg,
        'fit': x_seg_fit,
        'params': np.array(params),
        'excludedSamples': excludedSamples
    }
    return out
def estimateCaDecayKinetics(time, signals, p0 = None, thr = 2, preTime = 10, 
                            postTime = 40):
    """
    Given a time vector and Ca signal matrix of shape = (C,T), where
        C = # of cells, and T = # of time points (must match length of time
        vector), returns output of shape = (nSamples, 2), where the 1st and
        2nd columns contain the fast and slow decay tau estimates after
        fitting Ca2+ signals with  double exponential
    Parameters:
    time - Time vector of length T
    signals - Ca signals array of shape (nSamples,T)
    p0 - Array-like, (tau_fast, tau_slow, wt_fast), where tau_fast is the 
        fast decay time constant (in sec), tau_slow is the slow decay
        constant, and wt_fast is the weight of the fast exponential (<1)
        for fitting the signal as a weighted sum of the fast and slow
        exponential. Default is None, in which case fitting optimization
        begins without initial estimate
    thr - Threshold for peak detection in Ca signals, in units of zscore
    preTime - Pre-peak time length of the Ca signals to include for segmentation
    postTime - Post-peak "           "          "               "
    Avinash Pujala, JRC, 2017
        
    """
    import numpy as np
    from scipy.optimize import curve_fit as cf
    import apCode.SignalProcessingTools as spt
    import apCode.AnalyzeEphysData as aed
    
    def doubleExp(time, tau1, tau2, wt1):    
        wt2 = 1-wt1
        time = time - time[0]
        e = wt1*np.exp(-time/tau1) + wt2*np.exp(-time/tau2)
        return e
    
    def listToArray(x):
        lens = [len(item) for item in x]
        lenOfLens = len(lens)       
        lens = lens[np.min((lenOfLens-1,2))]
        a = np.zeros((len(x),lens))
        delInds = []
        for itemNum,item in enumerate(x):
            if len(item) == lens:
                a[itemNum,:] = item
            else:
                delInds.append(itemNum)
        a = np.delete(a,delInds,axis = 0)
        return a, delInds
    if np.ndim(signals)==1:
        signals = np.reshape(signals,(1,len(signals)))
    dt = time[2]-time[1]
    pts_post = np.round(postTime/dt).astype(int)
    pts_pre = np.round(preTime/dt).astype(int) 
    x_norm = spt.zscore(signals,axis = 1)
    x_seg, params, x_seg_fit = [],[],[]
    nSamples = np.shape(signals)[0]
    excludedSamples = np.zeros((nSamples,1))
    for nSample in np.arange(nSamples):
        inds_pk = spt.findPeaks(x_norm[nSample,:],thr = thr,ampType = 'rel')[0]
        if len(inds_pk)==0:
            print('Peak detection failed for sample #', nSample, '. Try lowering threshold')
            excludedSamples[nSample] = 1
        else:
            blah = aed.SegmentDataByEvents(signals[nSample,:],inds_pk,pts_pre,pts_post,axis = 0)
            blah = listToArray(blah)[0]          
            blah = np.mean(blah,axis=0)
            x_seg.append(blah) 
            ind_max = np.where(blah == np.max(blah))[0][0]
            y = spt.standardize(blah[ind_max:])
            t = np.arange(len(y))*dt           
            popt,pcov = cf(doubleExp,t,y,p0 = [10,20, 0.5], bounds = (0,20))
            if popt[0]> popt[1]:
                popt[0:2] = popt[2:0:-1]
                popt[-1] = 1-popt[-1]
            params.append(popt)
            foo = doubleExp(t,popt[0],popt[1],popt[2])
            x_seg_fit.append(foo)
    excludedSamples = np.where(excludedSamples)[0]
    includedSamples = np.setdiff1d(np.arange(nSamples),excludedSamples)
    x_seg,delInds = listToArray(x_seg)
    params = np.delete(np.array(params),delInds,axis = 0)
    delInds = includedSamples[delInds]
    if len(delInds)>0:
        print('Sample #', delInds, 'excluded for short segment length. Consider decreasing pre-peak time length')
    excludedSamples = np.union1d(delInds,excludedSamples)
    
    x_seg = spt.standardize(np.array(x_seg),axis = 1)    
    x_seg_fit = np.array(listToArray(x_seg_fit)[0])
    out = {'raw': x_seg,'fit': x_seg_fit,'params': np.array(params),'excludedSamples': excludedSamples}
    return out