def constrained_foopsi_parallel(arg_in):
    """ necessary for parallel computation of the function  constrained_foopsi
    """  
    
    Ytemp, nT, jj_, bl, c1, g, sn, args = arg_in
    T=np.shape(Ytemp)[0]
    cc_,cb_,c1_,gn_,sn_,sp_ = constrained_foopsi(Ytemp/nT, bl = bl,  c1 = c1, g = g,  sn = sn, **args)
    gd_ = np.max(np.roots(np.hstack((1,-gn_.T))));  
    gd_vec = gd_**range(T)   
    

    C_ = cc_[:].T + cb_ + np.dot(c1_,gd_vec)
    Sp_ = sp_[:T].T
    Ytemp_ = Ytemp - np.dot(nT,C_).T
    
    return C_,Sp_,Ytemp_,cb_,c1_,sn_,gn_,jj_
def update_temporal_components(Y,A,b,Cin,fin,ITER=2,method_foopsi='constrained_foopsi',n_processes=1, backend='single_thread', memory_efficient=False, **kwargs):
#def update_temporal_components(Y,A,b,Cin,fin,ITER=2,method_foopsi='constrained_foopsi',deconv_method = 'cvx', g='None',**kwargs):
    """update temporal components and background given spatial components using a block coordinate descent approach
    Inputs:
    Y: np.ndarray (2D)
        input data with time in the last axis (d x T)
    A: sparse matrix (crc format)
        matrix of temporal components (d x K)
    Cin: np.ndarray
        current estimate of temporal components (K x T)
    ITER: positive integer
        Maximum number of block coordinate descent loops. Default: 2
    fin: np.ndarray
        current estimate of temporal background (vector of length T)
    method_foopsi: string
        Method of deconvolution of neural activity. 
        Default: constrained_foopsi (constrained deconvolution, the only method supported at the moment)
    deconv_method: string
        Solver for constrained foopsi ('cvx' or 'spgl1', default: 'cvx')
    g:  np.ndarray
        Global time constant (not used)
    **kwargs: all parameters passed to constrained_foopsi
                               b=None, 
                               c1=None,
                               g=None,
                               sn=None, 
                               p=2, 
                               method='cvx', 
                               bas_nonneg=True, 
                               noise_range=[0.25, 0.5], 
                               noise_method='logmexp', 
                               lags=5, 
                               resparse=0, 
                               fudge_factor=1, 
                               verbosity=False):
                               
    Outputs:
    C:     np.matrix
            matrix of temporal components (K x T)
    f:     np.array
            vector of temporal background (length T) 
    Y_res: np.ndarray
            matrix with current residual (d x T)
    P_:    dictionary
            Dictionary with parameters for each temporal component:
                P_.b:           baseline for fluorescence trace
                P_.c1:          initial concentration
                P_.gn:          discrete time constant
                P_.neuron_sn:   noise level
                P_.neuron_id:   index of component
    Sp:    np.matrix
            matrix of deconvolved neural activity (K x T)
    """


    d,T = np.shape(Y);
    

    nr = np.shape(A)[-1]
    A = scipy.sparse.hstack((A,coo_matrix(b)))
    Cin =  np.vstack((Cin,fin));
    C = Cin;
    #%
    nA = np.squeeze(np.array(np.sum(np.square(A.todense()),axis=0)))
    
    Y=np.matrix(Y)
    C=np.matrix(C)
    Cin=np.matrix(Cin)
    Sp = np.zeros((nr,T))
    YrA = Y.T*A - Cin.T*(A.T*A);


    for iter in range(ITER):
        idxs=range(nr+1)
        random.shuffle(idxs)
        P_=[];
    #    perm = randperm(nr+1)
        for jj,ii in enumerate(idxs):            
            #ii=jj
            #print ii,jj
            pars=dict(kwargs)
    #        ii = perm(jj);
            if ii<nr:                
                if method_foopsi == 'constrained_foopsi':
                        #print YrA.shape 
                        #print YrA.shape
                        YrA[:,ii] = YrA[:,ii] + nA[ii]*Cin[ii,:].T                  
                        cc,cb,c1,gn,sn,sp = constrained_foopsi(np.squeeze(np.asarray(YrA[:,ii]/nA[ii])), **pars)
                        #print pars
                        pars['gn'] = gn
                        
                        gd = np.max(np.roots(np.hstack((1,-gn.T))));  # decay time constant for initial concentration
                        gd_vec = gd**range(T)
                        
                        C[ii,:] = cc[:].T + cb + c1*gd_vec
                        Sp[ii,:] = sp[:T].T
                        YrA[:,ii] = YrA[:,ii] - np.matrix(nA[ii]*C[ii,:]).T
                        pars['b'] = cb
                        pars['c1'] = c1           
                        pars['neuron_sn'] = sn
                        pars['neuron_id'] = ii
                        P_.append(pars)
                else:
                        raise Exception('undefined  method')                        
                    
                
            else:
                YrA[:,ii] = YrA[:,ii] + nA[ii]*Cin[ii,:].T
                cc = np.maximum(YrA[:,ii]/nA[ii],0)
                C[ii,:] = cc[:].T # if you use this should give an error that we need to correct
                YrA[:,ii] = YrA[:,ii] - nA[ii]*C[ii,:].T
            
            if (jj+1)%10 == 0:
                print str(jj+1) + ' out of total ' + str(nr+1) + ' temporal components updated \n'
    
    
        #%disp(norm(Fin(1:nr,:) - F,'fro')/norm(F,'fro'));
        if scipy.linalg.norm(Cin - C,'fro')/scipy.linalg.norm(C,'fro') <= 1e-3:
            # stop if the overall temporal component does not change by much
            break
        else:
            Cin = C
        
    
    
    Y_res = Y - A*C
    
    f = C[nr:,:]
    C = C[:nr,:]
        
    P_ = sorted(P_, key=lambda k: k['neuron_id']) 
    
    return C,f,Y_res,P_,Sp
def update_temporal_components(Y,A,b,Cin,fin,ITER=1,method='constrained_foopsi',deconv_method = 'cvx', g='None',**kwargs):
#                               b=None, 
#                               c1=None,
#                               g=None,
#                               sn=None, 
#                               p=2, 
#                               method='cvx', 
#                               bas_nonneg=True, 
#                               noise_range=[0.25, 0.5], 
#                               noise_method='logmexp', 
#                               lags=5, 
#                               resparse=0, 
#                               fudge_factor=1, 
#                               verbosity=False):
#    """
#    update temporal components and background given spatial components
#    **kwargs: all parameters passed to constrained_foopsi
#    """


    d,T = np.shape(Y);
    

    nr = np.shape(A)[-1]
    A = scipy.sparse.hstack((A,coo_matrix(b)))
    Cin =  np.vstack((Cin,fin));
    C = Cin;
    #%
    nA = np.squeeze(np.array(np.sum(np.square(A.todense()),axis=0)))
    
    Y=np.matrix(Y)
    C=np.matrix(C)
    Cin=np.matrix(Cin)
    Sp = np.zeros((nr,T))
    YrA = Y.T*A - Cin.T*(A.T*A);


    for iter in range(ITER):
        idxs=range(nr+1)
        random.shuffle(idxs)
        P_=[];
    #    perm = randperm(nr+1)
        for jj,ii in enumerate(idxs):            
            #ii=jj
            #print ii,jj
            pars=dict(kwargs)
    #        ii = perm(jj);
            if ii<nr:                
                if method == 'constrained_foopsi':
                        #print YrA.shape 
                        #print YrA.shape
                        YrA[:,ii] = YrA[:,ii] + nA[ii]*Cin[ii,:].T                  
                        cc,cb,c1,gn,sn,sp = constrained_foopsi(np.squeeze(np.asarray(YrA[:,ii]/nA[ii])), method = deconv_method, **pars)
                        #print pars
                        pars['gn'] = gn
                        
                        gd = np.max(np.roots(np.hstack((1,-gn.T))));  # decay time constant for initial concentration
                        gd_vec = gd**range(T)
                        
                        C[ii,:] = cc[:].T + cb + c1*gd_vec
                        Sp[ii,:] = sp[:T].T
                        YrA[:,ii] = YrA[:,ii] - np.matrix(nA[ii]*C[ii,:]).T
                        pars['b'] = cb
                        pars['c1'] = c1           
                        pars['neuron_sn'] = sn
                        pars['neuron_id'] = ii
                        P_.append(pars)
                else:
                        raise Exception('undefined  method')                        
                    
                
            else:
                YrA[:,ii] = YrA[:,ii] + nA[ii]*Cin[ii,:].T
                cc = np.maximum(YrA[:,ii]/nA[ii],0)
                #C[ii,:] = full(cc');
                YrA[:,ii] = YrA[:,ii] - nA[ii]*C[ii,:].T
            
            if (jj+1)%10 == 0:
                print str(jj+1) + ' out of total ' + str(nr+1) + ' temporal components updated \n'
    
    
        #%disp(norm(Fin(1:nr,:) - F,'fro')/norm(F,'fro'));
        if scipy.linalg.norm(Cin - C,'fro')/scipy.linalg.norm(C,'fro') <= 1e-3:
            # stop if the overall temporal component does not change by much
            break
        else:
            Cin = C
        
    
    
    Y_res = Y - A*C
    
    f = C[nr:,:]
    C = C[:nr,:]
        
    P_ = sorted(P_, key=lambda k: k['neuron_id']) 
    
    return C,f,Y_res,P_,Sp
Beispiel #4
0
def merge_components(Y,
                     A,
                     b,
                     C,
                     f,
                     S,
                     sn_pix,
                     temporal_params,
                     spatial_params,
                     thr=0.85,
                     fast_merge=True,
                     mx=1000,
                     bl=None,
                     c1=None,
                     sn=None,
                     g=None):
    """ Merging of spatially overlapping components that have highly correlated temporal activity
    The correlation threshold for merging overlapping components is user specified in thr
     
Parameters
-----------     

Y: np.ndarray
     residual movie after subtracting all found components (Y_res = Y - A*C - b*f) (d x T)
A: sparse matrix
     matrix of spatial components (d x K)
b: np.ndarray
     spatial background (vector of length d)
C: np.ndarray
     matrix of temporal components (K x T)
f:     np.ndarray
     temporal background (vector of length T)     
S:     np.ndarray            
     matrix of deconvolved activity (spikes) (K x T)
sn_pix: ndarray
     noise standard deviation for each pixel
temporal_params: dictionary 
     all the parameters that can be passed to the update_temporal_components function
spatial_params: dictionary 
     all the parameters that can be passed to the update_spatial_components function     
     
thr:   scalar between 0 and 1
     correlation threshold for merging (default 0.85)
mx:    int
     maximum number of merging operations (default 50)
sn_pix:    nd.array
     noise level for each pixel (vector of length d)
 
bl:        
     baseline for fluorescence trace for each row in C
c1:        
     initial concentration for each row in C
g:         
     discrete time constant for each row in C
sn:        
     noise level for each row in C

Returns
--------

A:     sparse matrix
        matrix of merged spatial components (d x K)
C:     np.ndarray
        matrix of merged temporal components (K x T)
nr:    int
    number of components after merging
merged_ROIs: list
    index of components that have been merged     
S:     np.ndarray            
        matrix of merged deconvolved activity (spikes) (K x T)
bl: float       
    baseline for fluorescence trace
c1: float       
    initial concentration
g:  float       
    discrete time constant
sn: float      
    noise level    
    """

    #%

    nr = A.shape[1]
    if bl is not None and len(bl) != nr:
        raise Exception(
            "The number of elements of bl must match the number of components")

    if c1 is not None and len(c1) != nr:
        raise Exception(
            "The number of elements of c1 must match the number of components")

    if sn is not None and len(sn) != nr:
        raise Exception(
            "The number of elements of bl must match the number of components")

    if g is not None and len(g) != nr:
        raise Exception(
            "The number of elements of g must match the number of components")

    [d, T] = np.shape(Y)
    #    C_corr = np.corrcoef(C[:nr,:],C[:nr,:])[:nr,:nr];
    C_corr = np.corrcoef(C)
    FF1 = C_corr >= thr
    #find graph of strongly correlated temporal components
    A_corr = A.T * A
    A_corr.setdiag(0)
    FF2 = A_corr > 0  # % find graph of overlapping spatial components
    FF3 = np.logical_and(FF1, FF2.todense())
    FF3 = coo_matrix(FF3)
    c, l = csgraph.connected_components(FF3)  # % extract connected components

    p = temporal_params['p']
    MC = []
    for i in range(c):
        if np.sum(l == i) > 1:
            MC.append((l == i).T)
    MC = np.asarray(MC).T

    if MC.ndim > 1:

        cor = np.zeros((np.shape(MC)[1], 1))

        for i in range(np.size(cor)):
            fm = np.where(MC[:, i])[0]
            for j1 in range(np.size(fm)):
                for j2 in range(j1 + 1, np.size(fm)):
                    cor[i] = cor[i] + C_corr[fm[j1], fm[j2]]

        if not fast_merge:
            Y_res = Y - A.dot(C)

        if np.size(cor) > 1:
            ind = np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]

        nm = min((np.size(ind), mx))  # number of merging operations

        A_merged = lil_matrix((d, nm))
        C_merged = np.zeros((nm, T))
        S_merged = np.zeros((nm, T))
        bl_merged = np.zeros((nm, 1))
        c1_merged = np.zeros((nm, 1))
        sn_merged = np.zeros((nm, 1))
        g_merged = np.zeros((nm, p))

        #        P_merged=[];
        merged_ROIs = []

        for i in range(nm):
            #            P_cycle=dict()
            merged_ROI = np.where(MC[:, ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI, :]**2, axis=1))
            #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))
            if fast_merge:
                Acsc = A.tocsc()[:, merged_ROI]
                Acsd = Acsc.toarray()
                Ctmp = C[merged_ROI, :]
                print merged_ROI.T
                #aa  =  A.tocsc()[:,merged_ROI].dot(scipy.sparse.diags(nC,0,(len(nC),len(nC)))).sum(axis=1)
                aa = Acsc.dot(scipy.sparse.diags(
                    nC, 0, (len(nC), len(nC)))).sum(axis=1)
                for iter in range(10):
                    #cc = np.dot(aa.T.dot(A.toarray()[:,merged_ROI]),C[merged_ROI,:])/(aa.T*aa)
                    cc = np.dot(aa.T.dot(Acsd), Ctmp) / (aa.T * aa)
                    #aa = A.tocsc()[:,merged_ROI].dot(C[merged_ROI,:].dot(cc.T))/(cc*cc.T)
                    aa = Acsc.dot(Ctmp.dot(cc.T)) / (cc * cc.T)

#                nC = np.sqrt(np.sum(A.toarray()[:,merged_ROI]**2,axis=0))*np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
                nC = np.sqrt(np.sum(Acsd**2, axis=0)) * np.sqrt(
                    np.sum(Ctmp**2, axis=1))
                nA = np.sqrt(np.sum(np.array(aa)**2))
                aa /= nA
                cc *= nA

                indx = np.argmax(nC)

                if g is not None:
                    cc, bm, cm, gm, sm, ss = constrained_foopsi(
                        np.array(cc).squeeze(),
                        g=g[merged_ROI[indx]],
                        **temporal_params)
                else:
                    cc, bm, cm, gm, sm, ss = constrained_foopsi(
                        np.array(cc).squeeze(), g=None, **temporal_params)

                A_merged[:, i] = aa
                C_merged[i, :] = cc
                S_merged[i, :] = ss[:T]
                bl_merged[i] = bm
                c1_merged[i] = cm
                sn_merged[i] = sm
                g_merged[i, :] = gm
            else:
                A_merged[:, i] = lil_matrix((A.tocsc()[:, merged_ROI].dot(
                    scipy.sparse.diags(nC, 0,
                                       (len(nC), len(nC))))).sum(axis=1))
                Y_res = Y_res + A.tocsc()[:, merged_ROI].dot(C[merged_ROI, :])
                aa_1 = scipy.sparse.linalg.spsolve(
                    scipy.sparse.diags(nC, 0, (len(nC), len(nC))),
                    csc_matrix(C[merged_ROI, :]))
                aa_2 = (aa_1).mean(axis=0)
                ff = np.nonzero(A_merged[:, i])[0]
                #            cc,_,_,Ptemp,_ = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,deconv_method=deconv_method)
                cc, _, _, _, bl__, c1__, sn__, g__, YrA = update_temporal_components(
                    np.asarray(Y_res[ff, :]),
                    A_merged[ff, i],
                    b[ff],
                    aa_2,
                    f,
                    bl=None,
                    c1=None,
                    sn=None,
                    g=None,
                    **temporal_params)
                aa, bb, cc = update_spatial_components(np.asarray(Y_res),
                                                       cc,
                                                       f,
                                                       A_merged[:, i],
                                                       sn=sn_pix,
                                                       **spatial_params)
                A_merged[:, i] = aa.tocsr()
                cc, _, _, ss, bl__, c1__, sn__, g__, YrA = update_temporal_components(
                    Y_res[ff, :],
                    A_merged[ff, i],
                    bb[ff],
                    cc,
                    f,
                    bl=bl__,
                    c1=c1__,
                    sn=sn__,
                    g=g__,
                    **temporal_params)

                C_merged[i, :] = cc
                S_merged[i, :] = ss
                bl_merged[i] = bl__[0]
                c1_merged[i] = c1__[0]
                sn_merged[i] = sn__[0]
                g_merged[i, :] = g__[0]
                if i + 1 < nm:
                    Y_res[ff, :] = Y_res[ff, :] - A_merged[ff, i] * cc

        #%
        neur_id = np.unique(np.hstack(merged_ROIs))
        good_neurons = np.setdiff1d(range(nr), neur_id)

        A = scipy.sparse.hstack((A.tocsc()[:, good_neurons], A_merged.tocsc()))
        C = np.vstack((C[good_neurons, :], C_merged))
        if S is not None:
            S = np.vstack((S[good_neurons, :], S_merged))
        if bl is not None:
            bl = np.hstack((bl[good_neurons], np.array(bl_merged).flatten()))
        if c1 is not None:
            c1 = np.hstack((c1[good_neurons], np.array(c1_merged).flatten()))
        if sn is not None:
            sn = np.hstack((sn[good_neurons], np.array(sn_merged).flatten()))
        if g is not None:
            g = np.vstack((np.vstack(g)[good_neurons], g_merged))

    #    P_new=list(P_[good_neurons].copy())
#        P_new=[P_[pp] for pp in good_neurons]
#
#        for p in P_merged:
#            P_new.append(p)
#
        nr = nr - len(neur_id) + nm

    else:
        print('********** No neurons merged! ***************')
        merged_ROIs = []

    return A, C, nr, merged_ROIs, S, bl, c1, sn, g
def merge_components(Y,A,b,C,f,S,sn_pix,temporal_params,spatial_params,thr=0.85,fast_merge=True,mx=50,bl=None,c1=None,sn=None,g=None):
    """ Merging of spatially overlapping components that have highly correlated temporal activity
    The correlation threshold for merging overlapping components is user specified in thr
     
Parameters
-----------     

Y: np.ndarray
     residual movie after subtracting all found components (Y_res = Y - A*C - b*f) (d x T)
A: sparse matrix
     matrix of spatial components (d x K)
b: np.ndarray
     spatial background (vector of length d)
C: np.ndarray
     matrix of temporal components (K x T)
f:     np.ndarray
     temporal background (vector of length T)     
S:     np.ndarray            
     matrix of deconvolved activity (spikes) (K x T)
sn_pix: ndarray
     noise standard deviation for each pixel
temporal_params: dictionary 
     all the parameters that can be passed to the update_temporal_components function
spatial_params: dictionary 
     all the parameters that can be passed to the update_spatial_components function     
     
thr:   scalar between 0 and 1
     correlation threshold for merging (default 0.85)
mx:    int
     maximum number of merging operations (default 50)
sn_pix:    nd.array
     noise level for each pixel (vector of length d)
 
bl:        
     baseline for fluorescence trace for each row in C
c1:        
     initial concentration for each row in C
g:         
     discrete time constant for each row in C
sn:        
     noise level for each row in C

Returns
--------

A:     sparse matrix
        matrix of merged spatial components (d x K)
C:     np.ndarray
        matrix of merged temporal components (K x T)
nr:    int
    number of components after merging
merged_ROIs: list
    index of components that have been merged     
S:     np.ndarray            
        matrix of merged deconvolved activity (spikes) (K x T)
bl: float       
    baseline for fluorescence trace
c1: float       
    initial concentration
g:  float       
    discrete time constant
sn: float      
    noise level    
    """
    
#%
    
    nr = A.shape[1]
    if bl is not None and len(bl) != nr:
        raise Exception("The number of elements of bl must match the number of components")
    
    if c1 is not None and len(c1) != nr:
        raise Exception("The number of elements of c1 must match the number of components")
    
    if sn is not None and len(sn) != nr:
        raise Exception("The number of elements of bl must match the number of components")
    
    if g is not None and len(g) != nr:
        raise Exception("The number of elements of g must match the number of components")

        
    [d,T] = np.shape(Y)
    C_corr = np.corrcoef(C[:nr,:],C[:nr,:])[:nr,:nr];
    FF1=C_corr>=thr; #find graph of strongly correlated temporal components 
    A_corr=A.T*A
    A_corr.setdiag(0)
    FF2=A_corr>0            # % find graph of overlapping spatial components
    FF3=np.logical_and(FF1,FF2.todense())
    FF3=coo_matrix(FF3)
    c,l=csgraph.connected_components(FF3) # % extract connected components
    
    p=temporal_params['p']
    MC=[];
    for i in range(c):     
        if np.sum(l==i)>1:
            MC.append((l==i).T)
    MC=np.asarray(MC).T
    
    if MC.ndim>1:

        cor = np.zeros((np.shape(MC)[1],1));
        
            
        for i in range(np.size(cor)):
            fm = np.where(MC[:,i])[0]
            for j1 in range(np.size(fm)):        
                for j2 in range(j1+1,np.size(fm)):
                    cor[i] = cor[i] +C_corr[fm[j1],fm[j2]]
        
        if not fast_merge:
            Y_res = Y - A.dot(C)
            
        if np.size(cor) > 1:
            ind=np.argsort(np.squeeze(cor))[::-1]
        else:
            ind = [0]
    
        nm = min((np.size(ind),mx))   # number of merging operations
    
        A_merged = lil_matrix((d,nm));
        C_merged = np.zeros((nm,T));
        S_merged = np.zeros((nm,T));
        bl_merged=np.zeros((nm,1))
        c1_merged=np.zeros((nm,1))
        sn_merged=np.zeros((nm,1))
        g_merged=np.zeros((nm,p))
        
#        P_merged=[];
        merged_ROIs = []
    #%
        for i in range(nm):
#            P_cycle=dict()
            merged_ROI=np.where(MC[:,ind[i]])[0]
            merged_ROIs.append(merged_ROI)
            nC = np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
    #        A_merged[:,i] = np.squeeze((A[:,merged_ROI]*spdiags(nC,0,len(nC),len(nC))).sum(axis=1))    
            if fast_merge:
                aa  =  A.tocsc()[:,merged_ROI].dot(scipy.sparse.diags(nC,0,(len(nC),len(nC)))).sum(axis=1)
                for iter in range(10):
                    cc = np.dot(aa.T.dot(A.toarray()[:,merged_ROI]),C[merged_ROI,:])/(aa.T*aa)
                    aa = A.tocsc()[:,merged_ROI].dot(C[merged_ROI,:].dot(cc.T))/(cc*cc.T)
                
                nC = np.sqrt(np.sum(A.toarray()[:,merged_ROI]**2,axis=0))*np.sqrt(np.sum(C[merged_ROI,:]**2,axis=1))
                indx = np.argmax(nC)
                cc,bm,cm,gm,sm,ss = constrained_foopsi(np.array(cc).squeeze(),g=g[merged_ROI[indx]],**temporal_params)
                A_merged[:,i] = aa; 
                C_merged[i,:] = cc
                S_merged[i,:] = ss[:T]
                bl_merged[i] = bm
                c1_merged[i] = cm
                sn_merged[i] = sm
                g_merged[i,:] = gm 
            else:
                A_merged[:,i] = lil_matrix(( A.tocsc()[:,merged_ROI].dot(scipy.sparse.diags(nC,0,(len(nC),len(nC))))).sum(axis=1))        
                Y_res = Y_res + A.tocsc()[:,merged_ROI].dot(C[merged_ROI,:])                
                aa_1=scipy.sparse.linalg.spsolve(scipy.sparse.diags(nC,0,(len(nC),len(nC))),csc_matrix(C[merged_ROI,:]))
                aa_2=(aa_1).mean(axis=0)                        
                ff = np.nonzero(A_merged[:,i])[0]         
    #            cc,_,_,Ptemp,_ = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,p=p,deconv_method=deconv_method)
                cc,_,_,_,bl__,c1__,sn__,g__,YrA = update_temporal_components(np.asarray(Y_res[ff,:]),A_merged[ff,i],b[ff],aa_2,f,bl=None,c1=None,sn=None,g=None,**temporal_params)                     
                aa,bb,cc = update_spatial_components(np.asarray(Y_res),cc,f,A_merged[:,i],sn=sn_pix,**spatial_params)
                A_merged[:,i] = aa.tocsr();                
                cc,_,_,ss,bl__,c1__,sn__,g__,YrA = update_temporal_components(Y_res[ff,:],A_merged[ff,i],bb[ff],cc,f,bl=bl__,c1=c1__,sn=sn__,g=g__,**temporal_params)                
    #            P_cycle=P_[merged_ROI[0]].copy()
    #            P_cycle['gn']=Ptemp[0]['gn']
    #            P_cycle['b']=Ptemp[0]['b']
    #            P_cycle['c1']=Ptemp[0]['c1']
    #            P_cycle['neuron_sn']=Ptemp[0]['neuron_sn']
    #            P_merged.append(P_cycle)
                C_merged[i,:] = cc
                S_merged[i,:] = ss
                bl_merged[i] = bl__[0]
                c1_merged[i] = c1__[0]
                sn_merged[i] = sn__[0]
                g_merged[i,:] = g__[0]                
                if i+1 < nm:
                    Y_res[ff,:] = Y_res[ff,:] - A_merged[ff,i]*cc
                
        #%
        neur_id = np.unique(np.hstack(merged_ROIs))                
        good_neurons=np.setdiff1d(range(nr),neur_id)    
        
        A = scipy.sparse.hstack((A.tocsc()[:,good_neurons],A_merged.tocsc()))
        C = np.vstack((C[good_neurons,:],C_merged))
        S = np.vstack((S[good_neurons,:],S_merged))
        bl=np.hstack((bl[good_neurons],np.array(bl_merged).flatten()))
        c1=np.hstack((c1[good_neurons],np.array(c1_merged).flatten()))
        sn=np.hstack((sn[good_neurons],np.array(sn_merged).flatten()))
        
        g=np.vstack((np.vstack(g)[good_neurons],g_merged))        
        
    #    P_new=list(P_[good_neurons].copy())
#        P_new=[P_[pp] for pp in good_neurons]
#        
#        for p in P_merged:
#            P_new.append(p)
#       
        nr = nr - len(neur_id) + nm
    
    else:
        print('********** No neurons merged! ***************')        
        merged_ROIs=[];        
        
    return A,C,nr,merged_ROIs,S,bl,c1,sn,g