示例#1
0
def merge_components(Y,
                     A,
                     b,
                     C,
                     f,
                     S,
                     sn_pix,
                     temporal_params,
                     spatial_params,
                     thr=0.85,
                     fast_merge=True,
                     mx=1000,
                     bl=None,
                     c1=None,
                     sn=None,
                     g=None):
    """ Merging of spatially overlapping components that have highly correlated temporal activity
    The correlation threshold for merging overlapping components is user specified in thr
     
Parameters
-----------     

Y: np.ndarray
     residual movie after subtracting all found components (Y_res = Y - A*C - b*f) (d x T)
A: sparse matrix
     matrix of spatial components (d x K)
b: np.ndarray
     spatial background (vector of length d)
C: np.ndarray
     matrix of temporal components (K x T)
f:     np.ndarray
     temporal background (vector of length T)     
S:     np.ndarray            
     matrix of deconvolved activity (spikes) (K x T)
sn_pix: ndarray
     noise standard deviation for each pixel
temporal_params: dictionary 
     all the parameters that can be passed to the update_temporal_components function
spatial_params: dictionary 
     all the parameters that can be passed to the update_spatial_components function     
     
thr:   scalar between 0 and 1
     correlation threshold for merging (default 0.85)
mx:    int
     maximum number of merging operations (default 50)
sn_pix:    nd.array
     noise level for each pixel (vector of length d)
 
bl:        
     baseline for fluorescence trace for each row in C
c1:        
     initial concentration for each row in C
g:         
     discrete time constant for each row in C
sn:        
     noise level for each row in C

Returns
--------

A:     sparse matrix
        matrix of merged spatial components (d x K)
C:     np.ndarray
        matrix of merged temporal components (K x T)
nr:    int
    number of components after merging
merged_ROIs: list
    index of components that have been merged     
S:     np.ndarray            
        matrix of merged deconvolved activity (spikes) (K x T)
bl: float       
    baseline for fluorescence trace
c1: float       
    initial concentration
g:  float       
    discrete time constant
sn: float      
    noise level    
    """

    #%

    nr = A.shape[1]
    if bl is not None and len(bl) != nr:
        raise Exception(
            "The number of elements of bl must match the number of components")

    if c1 is not None and len(c1) != nr:
        raise Exception(
            "The number of elements of c1 must match the number of components")

    if sn is not None and len(sn) != nr:
        raise Exception(
            "The number of elements of bl must match the number of components")

    if g is not None and len(g) != nr:
        raise Exception(
            "The number of elements of g must match the number of components")

    [d, T] = np.shape(Y)
    #    C_corr = np.corrcoef(C[:nr,:],C[:nr,:])[:nr,:nr];
    C_corr = np.corrcoef(C)
    FF1 = C_corr >= thr
    #find graph of strongly correlated temporal components
    A_corr = A.T * A
    A_corr.setdiag(0)
    FF2 = A_corr > 0  # % find graph of overlapping spatial components
    FF3 = np.logical_and(FF1, FF2.todense())
    FF3 = coo_matrix(FF3)
    c, l = csgraph.connected_components(FF3)  # % extract connected components

    p = temporal_params['p']
    MC = []
    for i in range(c):
        if np.sum(l == i) > 1:
            MC.append((l == i).T)
    MC = np.asarray(MC).T

    if MC.ndim > 1:

        cor = np.zeros((np.shape(MC)[1], 1))

        for i in range(np.size(cor)):
            fm = np.where(MC[:, i])[0]
            for j1 in range(np.size(fm)):
                for j2 in range(j1 + 1, np.size(fm)):
                    cor[i] = cor[i] + C_corr[fm[j1], fm[j2]]

        if not fast_merge:
            Y_res = Y - A.dot(C)

        if np.size(cor) > 1:
            ind = np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]

        nm = min((np.size(ind), mx))  # number of merging operations

        A_merged = lil_matrix((d, nm))
        C_merged = np.zeros((nm, T))
        S_merged = np.zeros((nm, T))
        bl_merged = np.zeros((nm, 1))
        c1_merged = np.zeros((nm, 1))
        sn_merged = np.zeros((nm, 1))
        g_merged = np.zeros((nm, p))

        #        P_merged=[];
        merged_ROIs = []

        for i in range(nm):
            #            P_cycle=dict()
            merged_ROI = np.where(MC[:, ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI, :]**2, axis=1))
            #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))
            if fast_merge:
                Acsc = A.tocsc()[:, merged_ROI]
                Acsd = Acsc.toarray()
                Ctmp = C[merged_ROI, :]
                print merged_ROI.T
                #aa  =  A.tocsc()[:,merged_ROI].dot(scipy.sparse.diags(nC,0,(len(nC),len(nC)))).sum(axis=1)
                aa = Acsc.dot(scipy.sparse.diags(
                    nC, 0, (len(nC), len(nC)))).sum(axis=1)
                for iter in range(10):
                    #cc = np.dot(aa.T.dot(A.toarray()[:,merged_ROI]),C[merged_ROI,:])/(aa.T*aa)
                    cc = np.dot(aa.T.dot(Acsd), Ctmp) / (aa.T * aa)
                    #aa = A.tocsc()[:,merged_ROI].dot(C[merged_ROI,:].dot(cc.T))/(cc*cc.T)
                    aa = Acsc.dot(Ctmp.dot(cc.T)) / (cc * cc.T)

#                nC = np.sqrt(np.sum(A.toarray()[:,merged_ROI]**2,axis=0))*np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
                nC = np.sqrt(np.sum(Acsd**2, axis=0)) * np.sqrt(
                    np.sum(Ctmp**2, axis=1))
                nA = np.sqrt(np.sum(np.array(aa)**2))
                aa /= nA
                cc *= nA

                indx = np.argmax(nC)

                if g is not None:
                    cc, bm, cm, gm, sm, ss = constrained_foopsi(
                        np.array(cc).squeeze(),
                        g=g[merged_ROI[indx]],
                        **temporal_params)
                else:
                    cc, bm, cm, gm, sm, ss = constrained_foopsi(
                        np.array(cc).squeeze(), g=None, **temporal_params)

                A_merged[:, i] = aa
                C_merged[i, :] = cc
                S_merged[i, :] = ss[:T]
                bl_merged[i] = bm
                c1_merged[i] = cm
                sn_merged[i] = sm
                g_merged[i, :] = gm
            else:
                A_merged[:, i] = lil_matrix((A.tocsc()[:, merged_ROI].dot(
                    scipy.sparse.diags(nC, 0,
                                       (len(nC), len(nC))))).sum(axis=1))
                Y_res = Y_res + A.tocsc()[:, merged_ROI].dot(C[merged_ROI, :])
                aa_1 = scipy.sparse.linalg.spsolve(
                    scipy.sparse.diags(nC, 0, (len(nC), len(nC))),
                    csc_matrix(C[merged_ROI, :]))
                aa_2 = (aa_1).mean(axis=0)
                ff = np.nonzero(A_merged[:, i])[0]
                #            cc,_,_,Ptemp,_ = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,deconv_method=deconv_method)
                cc, _, _, _, bl__, c1__, sn__, g__, YrA = update_temporal_components(
                    np.asarray(Y_res[ff, :]),
                    A_merged[ff, i],
                    b[ff],
                    aa_2,
                    f,
                    bl=None,
                    c1=None,
                    sn=None,
                    g=None,
                    **temporal_params)
                aa, bb, cc = update_spatial_components(np.asarray(Y_res),
                                                       cc,
                                                       f,
                                                       A_merged[:, i],
                                                       sn=sn_pix,
                                                       **spatial_params)
                A_merged[:, i] = aa.tocsr()
                cc, _, _, ss, bl__, c1__, sn__, g__, YrA = update_temporal_components(
                    Y_res[ff, :],
                    A_merged[ff, i],
                    bb[ff],
                    cc,
                    f,
                    bl=bl__,
                    c1=c1__,
                    sn=sn__,
                    g=g__,
                    **temporal_params)

                C_merged[i, :] = cc
                S_merged[i, :] = ss
                bl_merged[i] = bl__[0]
                c1_merged[i] = c1__[0]
                sn_merged[i] = sn__[0]
                g_merged[i, :] = g__[0]
                if i + 1 < nm:
                    Y_res[ff, :] = Y_res[ff, :] - A_merged[ff, i] * cc

        #%
        neur_id = np.unique(np.hstack(merged_ROIs))
        good_neurons = np.setdiff1d(range(nr), neur_id)

        A = scipy.sparse.hstack((A.tocsc()[:, good_neurons], A_merged.tocsc()))
        C = np.vstack((C[good_neurons, :], C_merged))
        if S is not None:
            S = np.vstack((S[good_neurons, :], S_merged))
        if bl is not None:
            bl = np.hstack((bl[good_neurons], np.array(bl_merged).flatten()))
        if c1 is not None:
            c1 = np.hstack((c1[good_neurons], np.array(c1_merged).flatten()))
        if sn is not None:
            sn = np.hstack((sn[good_neurons], np.array(sn_merged).flatten()))
        if g is not None:
            g = np.vstack((np.vstack(g)[good_neurons], g_merged))

    #    P_new=list(P_[good_neurons].copy())
#        P_new=[P_[pp] for pp in good_neurons]
#
#        for p in P_merged:
#            P_new.append(p)
#
        nr = nr - len(neur_id) + nm

    else:
        print('********** No neurons merged! ***************')
        merged_ROIs = []

    return A, C, nr, merged_ROIs, S, bl, c1, sn, g
def mergeROIS(Y_res,A,b,C,f,d1,d2,P_,thr=0.8,mx=50,sn=None,deconv_method='spgl1',min_size=3,max_size=8,dist=3,method_exp = 'ellipse', expandCore = iterate_structure(generate_binary_structure(2,1), 2).astype(int)):
    """
    merging of spatially overlapping components that have highly correlated tmeporal activity
    % The correlation threshold for merging overlapping components is user specified in P.merge_thr (default value 0.85)
    % Inputs:
    % Y_res:        residual movie after subtracting all found components
    % A:            matrix of spatial components
    % b:            spatial background
    % C:            matrix of temporal components
    % f:            temporal background
    % P:            parameter struct
    
    % Outputs:
    % A:            matrix of new spatial components
    % C:            matrix of new temporal components
    % nr:           new number of components
    % merged_ROIs:  list of old components that were merged
    
    % Written by:
    % Andrea Giovannucci from implementation of Eftychios A. Pnevmatikakis, Simons Foundation, 2015
    """
    
#%
    
    nr = A.shape[1]
    [d,T] = np.shape(Y_res)
    C_corr = np.corrcoef(C[:nr,:],C[:nr,:])[:nr,:nr];
    FF1=C_corr>=thr; #find graph of strongly correlated temporal components 
    A_corr=A.T*A
    A_corr.setdiag(0)
    FF2=A_corr>0            # % find graph of overlapping spatial components
    FF3=np.logical_and(FF1,FF2.todense())
    FF3=coo_matrix(FF3)
    c,l=csgraph.connected_components(FF3) # % extract connected components
    
    p=len(P_[0]['gn'])
    MC=[];
    for i in range(c):     
        if np.sum(l==i)>1:
            MC.append((l==i).T)
    MC=np.asarray(MC).T
    
    if MC.ndim>1:
        cor = np.zeros((np.shape(MC)[1],1));
        
            
        for i in range(np.size(cor)):
            fm = np.where(MC[:,i])[0]
            for j1 in range(np.size(fm)):        
                for j2 in range(j1+1,np.size(fm)):
                    print j1,j2
                    cor[i] = cor[i] +C_corr[fm[j1],fm[j2]]
        
        
        Y_res = Y_res + np.dot(b,f);
        if np.size(cor) > 1:
            ind=np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]
    
        nm = min((np.size(ind),mx))   # number of merging operations
    
        A_merged = lil_matrix((d,nm));
        C_merged = np.zeros((nm,T));
        
        P_merged=[];
        merged_ROIs = []
    #%
        for i in range(nm):
            P_cycle=dict()
            merged_ROI=np.where(MC[:,ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
    #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))    
            A_merged[:,i] = lil_matrix((A[:,merged_ROI]*scipy.sparse.diags(nC,0,(len(nC),len(nC)))).sum(axis=1))
    
            Y_res = Y_res + A[:,merged_ROI]*C[merged_ROI,:]
            
            aa_1=scipy.sparse.linalg.spsolve(scipy.sparse.diags(nC,0,(len(nC),len(nC))),csc_matrix(C[merged_ROI,:]))
            aa_2=(aa_1).mean(axis=0)        
            
            ff = np.nonzero(A_merged[:,i])[0]     
            
            cc,_,_,Ptemp,S = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,deconv_method=deconv_method)  
            
            aa,bb,cc = update_spatial_components(np.asarray(Y_res),cc,f,A_merged[:,i],d1=d1,d2=d2,sn=sn,min_size=min_size,max_size=max_size,dist=dist,method = method_exp, expandCore =expandCore)
    
            A_merged[:,i] = aa.tocsr();        
    
            cc,_,_,Ptemp,S = update_temporal_components(Y_res[ff,:],A_merged[ff,i],bb[ff],cc,f,p=p,deconv_method=deconv_method)
            
            P_cycle=P_[merged_ROI[0]].copy()
            P_cycle['gn']=Ptemp[0]['gn']
            P_cycle['b']=Ptemp[0]['b']
            P_cycle['c1']=Ptemp[0]['c1']
            P_cycle['neuron_sn']=Ptemp[0]['neuron_sn']
            P_merged.append(P_cycle)
            C_merged[i,:] = cc
            if i+1 < nm:
                Y_res[ff,:] = Y_res[ff,:] - A_merged[ff,i]*cc
                
        #%
        neur_id = np.unique(np.hstack(merged_ROIs))
        
    
    
        good_neurons=np.setdiff1d(range(nr),neur_id)    
        
        A= scipy.sparse.hstack((A[:,good_neurons],A_merged.tocsc()))
        C = np.vstack((C[good_neurons,:],C_merged))
        
    #    P_new=list(P_[good_neurons].copy())
        P_new=[P_[pp] for pp in good_neurons]
        
        for p in P_merged:
            P_new.append(p)
    
        nr = nr - len(neur_id) + nm
    
    else:
        warnings.warn('No neurons merged!')
        merged_ROIs=[];
        P_new=P_
        
    return A,C,nr,merged_ROIs,P_new,S
示例#3
0
def merge_components(Y,A,b,C,f,S,sn_pix,temporal_params,spatial_params,thr=0.85,fast_merge=True,mx=50,bl=None,c1=None,sn=None,g=None):
    """ Merging of spatially overlapping components that have highly correlated temporal activity
    The correlation threshold for merging overlapping components is user specified in thr
     
Parameters
-----------     

Y: np.ndarray
     residual movie after subtracting all found components (Y_res = Y - A*C - b*f) (d x T)
A: sparse matrix
     matrix of spatial components (d x K)
b: np.ndarray
     spatial background (vector of length d)
C: np.ndarray
     matrix of temporal components (K x T)
f:     np.ndarray
     temporal background (vector of length T)     
S:     np.ndarray            
     matrix of deconvolved activity (spikes) (K x T)
sn_pix: ndarray
     noise standard deviation for each pixel
temporal_params: dictionary 
     all the parameters that can be passed to the update_temporal_components function
spatial_params: dictionary 
     all the parameters that can be passed to the update_spatial_components function     
     
thr:   scalar between 0 and 1
     correlation threshold for merging (default 0.85)
mx:    int
     maximum number of merging operations (default 50)
sn_pix:    nd.array
     noise level for each pixel (vector of length d)
 
bl:        
     baseline for fluorescence trace for each row in C
c1:        
     initial concentration for each row in C
g:         
     discrete time constant for each row in C
sn:        
     noise level for each row in C

Returns
--------

A:     sparse matrix
        matrix of merged spatial components (d x K)
C:     np.ndarray
        matrix of merged temporal components (K x T)
nr:    int
    number of components after merging
merged_ROIs: list
    index of components that have been merged     
S:     np.ndarray            
        matrix of merged deconvolved activity (spikes) (K x T)
bl: float       
    baseline for fluorescence trace
c1: float       
    initial concentration
g:  float       
    discrete time constant
sn: float      
    noise level    
    """
    
#%
    
    nr = A.shape[1]
    if bl is not None and len(bl) != nr:
        raise Exception("The number of elements of bl must match the number of components")
    
    if c1 is not None and len(c1) != nr:
        raise Exception("The number of elements of c1 must match the number of components")
    
    if sn is not None and len(sn) != nr:
        raise Exception("The number of elements of bl must match the number of components")
    
    if g is not None and len(g) != nr:
        raise Exception("The number of elements of g must match the number of components")

        
    [d,T] = np.shape(Y)
    C_corr = np.corrcoef(C[:nr,:],C[:nr,:])[:nr,:nr];
    FF1=C_corr>=thr; #find graph of strongly correlated temporal components 
    A_corr=A.T*A
    A_corr.setdiag(0)
    FF2=A_corr>0            # % find graph of overlapping spatial components
    FF3=np.logical_and(FF1,FF2.todense())
    FF3=coo_matrix(FF3)
    c,l=csgraph.connected_components(FF3) # % extract connected components
    
    p=temporal_params['p']
    MC=[];
    for i in range(c):     
        if np.sum(l==i)>1:
            MC.append((l==i).T)
    MC=np.asarray(MC).T
    
    if MC.ndim>1:

        cor = np.zeros((np.shape(MC)[1],1));
        
            
        for i in range(np.size(cor)):
            fm = np.where(MC[:,i])[0]
            for j1 in range(np.size(fm)):        
                for j2 in range(j1+1,np.size(fm)):
                    cor[i] = cor[i] +C_corr[fm[j1],fm[j2]]
        
        if not fast_merge:
            Y_res = Y - A.dot(C)
            
        if np.size(cor) > 1:
            ind=np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]
    
        nm = min((np.size(ind),mx))   # number of merging operations
    
        A_merged = lil_matrix((d,nm));
        C_merged = np.zeros((nm,T));
        S_merged = np.zeros((nm,T));
        bl_merged=np.zeros((nm,1))
        c1_merged=np.zeros((nm,1))
        sn_merged=np.zeros((nm,1))
        g_merged=np.zeros((nm,p))
        
#        P_merged=[];
        merged_ROIs = []
    #%
        for i in range(nm):
#            P_cycle=dict()
            merged_ROI=np.where(MC[:,ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
    #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))    
            if fast_merge:
                aa  =  A.tocsc()[:,merged_ROI].dot(scipy.sparse.diags(nC,0,(len(nC),len(nC)))).sum(axis=1)
                for iter in range(10):
                    cc = np.dot(aa.T.dot(A.toarray()[:,merged_ROI]),C[merged_ROI,:])/(aa.T*aa)
                    aa = A.tocsc()[:,merged_ROI].dot(C[merged_ROI,:].dot(cc.T))/(cc*cc.T)
                
                nC = np.sqrt(np.sum(A.toarray()[:,merged_ROI]**2,axis=0))*np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
                indx = np.argmax(nC)
                cc,bm,cm,gm,sm,ss = constrained_foopsi(np.array(cc).squeeze(),g=g[merged_ROI[indx]],**temporal_params)
                A_merged[:,i] = aa; 
                C_merged[i,:] = cc
                S_merged[i,:] = ss[:T]
                bl_merged[i] = bm
                c1_merged[i] = cm
                sn_merged[i] = sm
                g_merged[i,:] = gm 
            else:
                A_merged[:,i] = lil_matrix(( A.tocsc()[:,merged_ROI].dot(scipy.sparse.diags(nC,0,(len(nC),len(nC))))).sum(axis=1))        
                Y_res = Y_res + A.tocsc()[:,merged_ROI].dot(C[merged_ROI,:])                
                aa_1=scipy.sparse.linalg.spsolve(scipy.sparse.diags(nC,0,(len(nC),len(nC))),csc_matrix(C[merged_ROI,:]))
                aa_2=(aa_1).mean(axis=0)                        
                ff = np.nonzero(A_merged[:,i])[0]         
    #            cc,_,_,Ptemp,_ = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,deconv_method=deconv_method)
                cc,_,_,_,bl__,c1__,sn__,g__,YrA = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,bl=None,c1=None,sn=None,g=None,**temporal_params)                     
                aa,bb,cc = update_spatial_components(np.asarray(Y_res),cc,f,A_merged[:,i],sn=sn_pix,**spatial_params)
                A_merged[:,i] = aa.tocsr();                
                cc,_,_,ss,bl__,c1__,sn__,g__,YrA = update_temporal_components(Y_res[ff,:],A_merged[ff,i],bb[ff],cc,f,bl=bl__,c1=c1__,sn=sn__,g=g__,**temporal_params)                
    #            P_cycle=P_[merged_ROI[0]].copy()
    #            P_cycle['gn']=Ptemp[0]['gn']
    #            P_cycle['b']=Ptemp[0]['b']
    #            P_cycle['c1']=Ptemp[0]['c1']
    #            P_cycle['neuron_sn']=Ptemp[0]['neuron_sn']
    #            P_merged.append(P_cycle)
                C_merged[i,:] = cc
                S_merged[i,:] = ss
                bl_merged[i] = bl__[0]
                c1_merged[i] = c1__[0]
                sn_merged[i] = sn__[0]
                g_merged[i,:] = g__[0]                
                if i+1 < nm:
                    Y_res[ff,:] = Y_res[ff,:] - A_merged[ff,i]*cc
                
        #%
        neur_id = np.unique(np.hstack(merged_ROIs))                
        good_neurons=np.setdiff1d(range(nr),neur_id)    
        
        A = scipy.sparse.hstack((A.tocsc()[:,good_neurons],A_merged.tocsc()))
        C = np.vstack((C[good_neurons,:],C_merged))
        S = np.vstack((S[good_neurons,:],S_merged))
        bl=np.hstack((bl[good_neurons],np.array(bl_merged).flatten()))
        c1=np.hstack((c1[good_neurons],np.array(c1_merged).flatten()))
        sn=np.hstack((sn[good_neurons],np.array(sn_merged).flatten()))
        
        g=np.vstack((np.vstack(g)[good_neurons],g_merged))        
        
    #    P_new=list(P_[good_neurons].copy())
#        P_new=[P_[pp] for pp in good_neurons]
#        
#        for p in P_merged:
#            P_new.append(p)
#       
        nr = nr - len(neur_id) + nm
    
    else:
        print('********** No neurons merged! ***************')        
        merged_ROIs=[];        
        
    return A,C,nr,merged_ROIs,S,bl,c1,sn,g   
示例#4
0
    def fit(self, images):
        """
       This method uses the cnmf algorithm to find sources in data.
       
       Parameters
       ----------
       images : mapped np.ndarray of shape (t,x,y) containing the images that vary over time.
    	
       Returns
       --------
       self 
    
       """
        T, d1, d2 = images.shape
        dims = (d1, d2)
        Yr = images.reshape([T, np.prod(dims)], order='F').T
        Y = np.transpose(images, [1, 2, 0])
        print T, d1, d2

        options = CNMFSetParms(Y,self.n_processes,p=self.p,gSig=self.gSig,K=self.k,ssub=self.ssub,tsub=self.tsub,\
                                        p_ssub=self.p_ssub, p_tsub=self.p_tsub, method_init= self.method_init)

        self.options = options

        if self.rf is None:

            Yr, sn, g, psx = preprocess_data(Yr,
                                             dview=self.dview,
                                             **options['preprocess_params'])

            if self.Ain is None:
                if self.alpha_snmf is not None:
                    options['init_params']['alpha_snmf'] = self.alpha_snmf

                self.Ain, self.Cin, self.b_in, self.f_in, center = initialize_components(
                    Y, normalize=True, **options['init_params'])

            if self.Ain.dtype == bool:
                A, b, Cin, fin = update_spatial_components(
                    Yr,
                    self.Cin,
                    self.f_in,
                    self.Ain,
                    sn=sn,
                    dview=self.dview,
                    **options['spatial_params'])
                self.f_in = fin
            else:
                A, b, Cin = update_spatial_components(
                    Yr,
                    self.Cin,
                    self.f_in,
                    self.Ain,
                    sn=sn,
                    dview=self.dview,
                    **options['spatial_params'])

            options['temporal_params'][
                'p'] = 0  # set this to zero for fast updating without deconvolution

            C, f, S, bl, c1, neurons_sn, g, YrA = update_temporal_components(
                Yr,
                A,
                b,
                Cin,
                self.f_in,
                dview=self.dview,
                **options['temporal_params'])

            if self.do_merge:
                A, C, nr, merged_ROIs, S, bl, c1, sn1, g1 = merge_components(
                    Yr,
                    A,
                    b,
                    C,
                    f,
                    S,
                    sn,
                    options['temporal_params'],
                    options['spatial_params'],
                    dview=self.dview,
                    bl=bl,
                    c1=c1,
                    sn=neurons_sn,
                    g=g,
                    thr=self.merge_thresh,
                    mx=50,
                    fast_merge=True)

            print A.shape

            A, b, C = update_spatial_components(Yr,
                                                C,
                                                f,
                                                A,
                                                sn=sn,
                                                dview=self.dview,
                                                **options['spatial_params'])
            options['temporal_params'][
                'p'] = self.p  # set it back to original value to perform full deconvolution

            C, f, S, bl, c1, neurons_sn, g1, YrA = update_temporal_components(
                Yr,
                A,
                b,
                C,
                f,
                dview=self.dview,
                bl=None,
                c1=None,
                sn=None,
                g=None,
                **options['temporal_params'])

        else:  # use patches

            if self.stride is None:
                self.stride = np.int(self.rf * 2 * .1)
                print('**** Setting the stride to 10% of 2*rf automatically:' +
                      str(self.stride))

            if type(images) is np.ndarray:
                raise Exception(
                    'You need to provide a memory mapped file as input if you use patches!!'
                )

            if self.only_init:
                options['patch_params']['only_init'] = True

            A, C, YrA, b, f, sn, optional_outputs = run_CNMF_patches(
                images.filename, (d1, d2, T),
                options,
                rf=self.rf,
                stride=self.stride,
                dview=self.dview,
                memory_fact=self.memory_fact,
                gnb=self.gnb)

            options = CNMFSetParms(Y,
                                   self.n_processes,
                                   p=self.p,
                                   gSig=self.gSig,
                                   K=A.shape[-1],
                                   thr=self.merge_thresh)
            pix_proc = np.minimum(
                np.int((d1 * d2) / self.n_processes / (T / 2000.)),
                np.int(
                    (d1 * d2) /
                    self.n_processes))  # regulates the amount of memory used
            options['spatial_params']['n_pixels_per_process'] = pix_proc
            options['temporal_params']['n_pixels_per_process'] = pix_proc
            #
            merged_ROIs = [0]
            while len(merged_ROIs) > 0:
                A, C, nr, merged_ROIs, S, bl, c1, sn, g = merge_components(
                    Yr,
                    A, [],
                    np.array(C), [],
                    np.array(C), [],
                    options['temporal_params'],
                    options['spatial_params'],
                    dview=self.dview,
                    thr=self.merge_thresh,
                    mx=np.Inf)

            C, f, S, bl, c1, neurons_sn, g2, YrA = update_temporal_components(
                Yr,
                A,
                b,
                C,
                f,
                dview=self.dview,
                bl=None,
                c1=None,
                sn=None,
                g=None,
                **options['temporal_params'])


#           idx_components, fitness, erfc ,r_values, num_significant_samples = evaluate_components(Y,C+YrA,A,N=self.N_samples_fitness,robust_std=self.robust_std,thresh_finess=self.fitness_threshold)
#           sure_in_idx= idx_components[np.logical_and(np.array(num_significant_samples)>0 ,np.array(r_values)>=self.corr_threshold)]
#
#           print ('Keeping ' + str(len(sure_in_idx)) + ' components out of ' + str(len(idx_components)))
#
#
#           A=A[:,sure_in_idx]
#           C=C[sure_in_idx,:]
#           YrA=YrA[sure_in_idx]

        self.S = S
        self.A = A
        self.C = C
        self.b = b
        self.f = f
        self.YrA = YrA
        self.sn = sn

        return self
示例#5
0
def mergeROIS(Y_res,
              A,
              b,
              C,
              f,
              S,
              d1,
              d2,
              P_,
              thr=0.85,
              mx=50,
              sn=None,
              deconv_method='spgl1',
              min_size=3,
              max_size=8,
              dist=3,
              method_exp='ellipse',
              expandCore=iterate_structure(generate_binary_structure(2, 1),
                                           2).astype(int)):
    """
    merging of spatially overlapping components that have highly correlated temporal activity
    The correlation threshold for merging overlapping components is user specified in thr
     Inputs:
     Y_res:        np.ndarray 
            residual movie after subtracting all found components (Y_res = Y - A*C - b*f) (d x T)
     A:     sparse matrix
                matrix of spatial components (d x K)
     b:     np.ndarray
                spatial background (vector of length d)
     C:     np.ndarray
                matrix of temporal components (K x T)
     f:     np.ndarray
                temporal background (vector of length T)
     P_:     struct
                structure with neuron parameteres
     S:     np.ndarray            
                matrix of deconvolved activity (spikes) (K x T)
     thr:   scalar between 0 and 1
                correlation threshold for merging (default 0.85)
     mx:    int
                maximum number of merging operations (default 50)
     sn:    nd.array
                noise level for each pixel (vector of length d)
    
    Outputs:
     A:     sparse matrix
                matrix of merged spatial components (d x K)
     C:     np.ndarray
                matrix of merged temporal components (K x T)
     nr:    int
            number of components after merging
     P_:     struct
                structure with new neuron parameteres
     S:     np.ndarray            
                matrix of merged deconvolved activity (spikes) (K x T)
    
    % Written by:
    % Andrea Giovannucci from implementation of Eftychios A. Pnevmatikakis, Simons Foundation, 2015
    """

    #%

    nr = A.shape[1]
    [d, T] = np.shape(Y_res)
    C_corr = np.corrcoef(C[:nr, :], C[:nr, :])[:nr, :nr]
    FF1 = C_corr >= thr
    #find graph of strongly correlated temporal components
    A_corr = A.T * A
    A_corr.setdiag(0)
    FF2 = A_corr > 0  # % find graph of overlapping spatial components
    FF3 = np.logical_and(FF1, FF2.todense())
    FF3 = coo_matrix(FF3)
    c, l = csgraph.connected_components(FF3)  # % extract connected components

    p = len(P_[0]['gn'])
    MC = []
    for i in range(c):
        if np.sum(l == i) > 1:
            MC.append((l == i).T)
    MC = np.asarray(MC).T

    if MC.ndim > 1:
        cor = np.zeros((np.shape(MC)[1], 1))

        for i in range(np.size(cor)):
            fm = np.where(MC[:, i])[0]
            for j1 in range(np.size(fm)):
                for j2 in range(j1 + 1, np.size(fm)):
                    print j1, j2
                    cor[i] = cor[i] + C_corr[fm[j1], fm[j2]]

        Y_res = Y_res + np.dot(b, f)
        if np.size(cor) > 1:
            ind = np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]

        nm = min((np.size(ind), mx))  # number of merging operations

        A_merged = lil_matrix((d, nm))
        C_merged = np.zeros((nm, T))
        S_merged = np.zeros((nm, T))

        P_merged = []
        merged_ROIs = []
        #%
        for i in range(nm):
            P_cycle = dict()
            merged_ROI = np.where(MC[:, ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI, :]**2, axis=1))
            #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))
            A_merged[:, i] = lil_matrix(
                (A[:, merged_ROI] *
                 scipy.sparse.diags(nC, 0, (len(nC), len(nC)))).sum(axis=1))

            Y_res = Y_res + A[:, merged_ROI] * C[merged_ROI, :]

            aa_1 = scipy.sparse.linalg.spsolve(
                scipy.sparse.diags(nC, 0, (len(nC), len(nC))),
                csc_matrix(C[merged_ROI, :]))
            aa_2 = (aa_1).mean(axis=0)

            ff = np.nonzero(A_merged[:, i])[0]

            cc, _, _, Ptemp, _ = update_temporal_components(
                np.asarray(Y_res[ff, :]),
                A_merged[ff, i],
                b[ff],
                aa_2,
                f,
                p=p,
                method=deconv_method)

            aa, bb, cc = update_spatial_components(np.asarray(Y_res),
                                                   cc,
                                                   f,
                                                   A_merged[:, i],
                                                   d1=d1,
                                                   d2=d2,
                                                   sn=sn,
                                                   min_size=min_size,
                                                   max_size=max_size,
                                                   dist=dist,
                                                   method=method_exp,
                                                   expandCore=expandCore)

            A_merged[:, i] = aa.tocsr()

            cc, _, _, Ptemp, ss = update_temporal_components(
                Y_res[ff, :],
                A_merged[ff, i],
                bb[ff],
                cc,
                f,
                p=p,
                method=deconv_method)

            P_cycle = P_[merged_ROI[0]].copy()
            P_cycle['gn'] = Ptemp[0]['gn']
            P_cycle['b'] = Ptemp[0]['b']
            P_cycle['c1'] = Ptemp[0]['c1']
            P_cycle['neuron_sn'] = Ptemp[0]['neuron_sn']
            P_merged.append(P_cycle)
            C_merged[i, :] = cc
            S_merged[i, :] = ss

            if i + 1 < nm:
                Y_res[ff, :] = Y_res[ff, :] - A_merged[ff, i] * cc

        #%
        neur_id = np.unique(np.hstack(merged_ROIs))

        good_neurons = np.setdiff1d(range(nr), neur_id)

        A = scipy.sparse.hstack((A[:, good_neurons], A_merged.tocsc()))
        C = np.vstack((C[good_neurons, :], C_merged))
        S = np.vstack((S[good_neurons, :], S_merged))
        #    P_new=list(P_[good_neurons].copy())
        P_new = [P_[pp] for pp in good_neurons]

        for p in P_merged:
            P_new.append(p)

        nr = nr - len(neur_id) + nm

    else:
        warnings.warn('No neurons merged!')
        merged_ROIs = []
        P_new = P_

    return A, C, nr, merged_ROIs, P_new, S
示例#6
0
  def fit(self, images):
  	"""
  	This method uses the cnmf algorithm to find sources in data.

  	Parameters
  	----------
  	images : np.ndarray
		Array of shape (t,x,y) containing the images that vary over time.


	Returns
    --------
    neurons : np.ndarray
    	Array of shape (x,y,n) where n is the neuron number
	temporaldata : np.ndarray
		Array of shape (n,t) where n is the neuron number and t is the frame number
  	"""
  	dims=(images.shape[1],images.shape[2])
	T=images.shape[0]
  	Yr = np.transpose(images, range(1, len(dims) + 1) + [0])
	Yr = np.reshape(Yr, (np.prod(dims), T), order='F')
	Y=np.reshape(Yr,dims+(T,),order='F')
	options = CNMFSetParms(Y,p=self.p,gSig=self.gSig,K=self.k, backend=self.backend, thr=self.merge_thresh, n_processes=self.n_processes)

	Cn = local_correlations(Y)
	Yr,sn = preprocess_data(Yr,**options['preprocess_params'])
	Atmp, Ctmp, b_in, f_in, center=initialize_components(Y, **options['init_params'])
	Ain,Cin = Atmp, Ctmp
	A,b,Cin = update_spatial_components(Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])  
	options['temporal_params']['p'] = 0 # set this to zero for fast updating without deconvolution
	C,f,S,bl,c1,neurons_sn,g,YrA = update_temporal_components(Yr,A,b,Cin,f_in,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
	A_m,C_m,nr_m,merged_ROIs,S_m,bl_m,c1_m,sn_m,g_m=merge_components(Yr,A,b,C,f,S,sn,options['temporal_params'],options['spatial_params'], options['merging'],bl=bl, c1=c1, sn=neurons_sn, g=g, mx=50, fast_merge = True)
	A2,b2,C2 = update_spatial_components(Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
	options['temporal_params']['p'] = self.p # set it back to original value to perform full deconvolution
	C2,f2,S2,bl2,c12,neurons_sn2,g21,YrA = update_temporal_components(Yr,A2,b2,C2,f,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
	A_or, temporaldata, srt = order_components(A2,C2)
	neurons=A_or.reshape(dims[1],dims[0],A_or.shape[1])
	return np.transpose(neurons, [1,0,2]), temporaldata


  #     images = check_images(images)
  #     chunk_size = chunk_size if chunk_size is not None else images.shape[1:]
  #     blocks = images.toblocks(chunk_size=chunk_size, padding=padding)
  #     sources = asarray(blocks.map_generic(self._get))

  #     # add offsets based on block coordinates
  #     for inds in itertools.product(*[range(d) for d in sources.shape]):
  #         offset = (asarray(inds) * asarray(blocks.blockshape)[1:])
  #         for source in sources[inds]:
  #             source.coordinates += offset
  #             if padding:
  #               leftpad = [blocks.padding[i + 1] if inds[i] != 0 else 0 for i in range(len(inds))]
  #               source.coordinates -= asarray(leftpad)
      
  #     # flatten list and create model
  #     flattened = list(itertools.chain.from_iterable(sources.flatten().tolist()))
  #     return ExtractionModel(many(flattened))

  # def _get(self, block):
  #     """
  #     Perform NMF on a block to identify spatial regions.
  #     """
  #     dims = block.shape[1:]
  #     max_size = prod(dims) / 2 if self.max_size == 'full' else self.max_size

  #     # reshape to t x spatial dimensions
  #     data = block.reshape(block.shape[0], -1)

  #     # build and apply NMF model to block
  #     model = SKNMF(self.k, max_iter=self.max_iter)
  #     model.fit(clip(data, 0, inf))

  #     # reconstruct sources as spatial objects in one array
  #     components = model.components_.reshape((self.k,) + dims)

  #     # convert from basis functions into shape
  #     # by median filtering (optional), applying a percentile threshold,
  #     # finding connected components and removing small objects
  #     combined = []
  #     for component in components:
  #         tmp = component > percentile(component, self.percentile)
  #         labels, num = label(tmp, return_num=True)
  #         if num == 1:
  #           counts = bincount(labels.ravel())
  #           if counts[1] < self.min_size:
  #             continue
  #           else:
  #             regions = labels
  #         else:
  #           regions = remove_small_objects(labels, min_size=self.min_size)
  #         ids = unique(regions)
  #         ids = ids[ids > 0]
  #         for ii in ids:
  #             r = regions == ii
  #             r = median_filter(r, 2)
  #             coords = asarray(where(r)).T
  #             if (size(coords) > 0) and (size(coords) < max_size):
  #                 combined.append(one(coords))

  #     # merge overlapping sources
  #     if self.overlap is not None:

  #         # iterate over source pairs and find a pair to merge
  #         def merge(sources):
  #             for i1, s1 in enumerate(sources):
  #                 for i2, s2 in enumerate(sources[i1+1:]):
  #                     if s1.overlap(s2) > self.overlap:
  #                         return i1, i1 + 1 + i2
  #             return None

  #         # merge pairs until none left to merge
  #         pair = merge(combined)
  #         testing = True
  #         while testing:
  #             if pair is None:
  #                 testing = False
  #             else:
  #                 combined[pair[0]] = combined[pair[0]].merge(combined[pair[1]])
  #                 del combined[pair[1]]
  #                 pair = merge(combined)

  #     return combined