Exemple #1
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3, normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1, method_ls='nnls_L0'):
    """update spatial footprints and background through Basis Pursuit Denoising 

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty        
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated 

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)
    f: np.ndarray
        same as f_in except if empty component deleted.

    """
    C = np.array(C)
    if normalize_yyt_one:
        #        cct=np.diag(C.dot(C.T))
        nr_C = np.shape(C)[0]
        d = scipy.sparse.lil_matrix((nr_C, nr_C))
        d.setdiag(np.sqrt(np.sum(C**2, 1)))
        A_in = A_in * d
        C = old_div(C, np.sqrt(np.sum(C**2, 1)[:, np.newaxis]))

    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not isinstance(Y, basestring):
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception('Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        print(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decreasing suitably.')
        n_pixels_per_process = d

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print("spatial support for each components given by the user")
        if C is None:
            INDav = old_div(IND.astype('float32'), np.sum(IND, axis=0))
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(
            A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore, dview=dview)
        print("found spatial support for each component")
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print((np.shape(A_in)))

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)       # number of neurons

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print(('cluster temporary folder:' + tmpf))
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()

    if dview is None:

        Y_name = Y
        C_name = Cf

    else:

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif isinstance(Y, basestring) or dview is None:
            Y_name = Y
        else:
            raise Exception('Not implemented consistently')
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
# for i in range(0, np.prod(dims) - n_pixels_per_process + 1,
# n_pixels_per_process)]
    cct = np.diag(C.dot(C.T))
    rank_f = nb
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, i + n_pixels_per_process)), method_ls, cct, rank_f])

    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, np.prod(dims))), method_ls, cct, rank_f])

    A_ = np.zeros((d, nr + np.size(f, 0)))
    print('Starting Update Spatial Components')

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(regression_ipyparallel, pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
##
#        Cf_ = [Cf[idx_, :] for idx_ in ind2_]
#
#        #% LARS regression
#        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))
#
#        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
#            if px % 1000 == 0:
#                print px
#            if np.size(c) > 0:
#                _, _, a, _, _ = lars_regression_noise_old(y, np.array(c.T), 1, sn[px]**2 * T)
#                if np.isscalar(a):
#                    A_[px, id2_] = a
#                else:
#                    A_[px, id2_] = a.T
##

    #%
    print('Updated Spatial Components')

    A_ = threshold_components(A_, dims, dview=dview, medw=medw, thr_method=thr_method,
                              maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc, se=se, ss=ss)

    print("threshold")
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating {} empty components!!'.format(len(ff)))
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
        background_ff = list(filter(lambda i: i > 0, ff-nr))
        f = np.delete(f, background_ff, 0)
        nr = nr - (len(ff) - len(background_ff))
        nb = nb - len(background_ff)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    # pdb.set_trace()
#    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y, f.T, block_size=1000, dview=dview) - \
            A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    print("Computing A_bas")
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)  # update baseline based on residual
    # A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print(("--- %s seconds ---" % (time.time() - start_time)))

    try:  # clean up
        # remove temporary file created
        print("Remove temporary file created")
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)


    # if A_in.dtype == bool:
    #     return A_, b, C, f
    # else:
    #     return A_, b, C
    return A_, b, C, f
Exemple #2
0
def update_temporal_components(Y,
                               A,
                               b,
                               Cin,
                               fin,
                               bl=None,
                               c1=None,
                               g=None,
                               sn=None,
                               nb=1,
                               ITER=2,
                               method_foopsi='constrained_foopsi',
                               block_size=20000,
                               memory_efficient=False,
                               debug=False,
                               dview=None,
                               **kwargs):
    """Update temporal components and background given spatial components using a block coordinate descent approach.

    Parameters
    -----------    

    Y: np.ndarray (2D)
        input data with time in the last axis (d x T)
    A: sparse matrix (crc format)
        matrix of temporal components (d x K)
    b: ndarray (dx1)
        current estimate of background component
    Cin: np.ndarray
        current estimate of temporal components (K x T)   
    fin: np.ndarray
        current estimate of temporal background (vector of length T)
    g:  np.ndarray
        Global time constant (not used)
    bl: np.ndarray
       baseline for fluorescence trace for each column in A
    c1: np.ndarray
       initial concentration for each column in A
    g:  np.ndarray       
       discrete time constant for each column in A
    sn: np.ndarray
       noise level for each column in A       
    nb: [optional] int
        Number of background components
    ITER: positive integer
        Maximum number of block coordinate descent loops. 
    method_foopsi: string
        Method of deconvolution of neural activity. constrained_foopsi is the only method supported at the moment.               
    n_processes: int
        number of processes to use for parallel computation. Should be less than the number of processes started with ipcluster.
    backend: 'str'
        single_thread no parallelization
        ipyparallel, parallelization using the ipyparallel cluster. You should start the cluster (install ipyparallel and then type 
        ipcluster -n 6, where 6 is the number of processes). 
        SLURM: using SLURM scheduler
    memory_efficient: Bool
        whether or not to optimize for memory usage (longer running times). nevessary with very large datasets  
    **kwargs: dict
        all parameters passed to constrained_foopsi except bl,c1,g,sn (see documentation). Some useful parameters are      
    p: int
        order of the autoregression model
    method: [optional] string
        solution method for constrained foopsi. Choices are
            'cvx':      using cvxopt and picos (slow especially without the MOSEK solver)
            'cvxpy':    using cvxopt and cvxpy with the ECOS solver (faster, default)

    solvers: list string
            primary and secondary (if problem unfeasible for approx solution) solvers to be used with cvxpy, default is ['ECOS','SCS']

    Note
    --------

    The temporal components are updated in parallel by default by forming of sequence of vertex covers.  

    Returns
    --------

    C:   np.ndarray
            matrix of temporal components (K x T)
    f:   np.array
            vector of temporal background (length T) 
    S:   np.ndarray            
            matrix of merged deconvolved activity (spikes) (K x T)
    bl:  float  
            same as input    
    c1:  float
            same as input    
    g:   float
            same as input    
    sn:  float
            same as input 
    YrA: np.ndarray
            matrix of spatial component filtered raw data, after all contributions have been removed.            
            YrA corresponds to the residual trace for each component and is used for faster plotting (K x T)

    """

    if 'p' not in kwargs or kwargs['p'] is None:
        raise Exception("You have to provide a value for p")

    d, T = np.shape(Y)
    nr = np.shape(A)[-1]

    if b is not None:
        if b.shape[0] < b.shape[1]:
            b = b.T
        nb = b.shape[1]

    if bl is None:
        bl = np.repeat(None, nr)

    if c1 is None:
        c1 = np.repeat(None, nr)

    if g is None:
        g = np.repeat(None, nr)

    if sn is None:
        sn = np.repeat(None, nr)

    A = scipy.sparse.hstack((A, coo_matrix(b)))
    S = np.zeros(np.shape(Cin))
    Cin = np.vstack((Cin, fin))
    C = Cin
    nA = np.squeeze(np.array(np.sum(np.square(A.todense()), axis=0)))

    Cin = coo_matrix(Cin)
    #YrA = ((A.T.dot(Y)).T-Cin.T.dot(A.T.dot(A)))
    print('Generating residuals')
    #    YA = (A.T.dot(Y).T)*spdiags(1./nA,0,nr+nb,nr+nb)

    if 'memmap' in str(type(Y)):
        if block_size >= 500:
            print('Forcing single thread for memory issues')
            dview_res = None
        else:
            print(
                'Using thread. If memory issues set block_size larger than 500'
            )
            dview_res = dview

        YA = parallel_dot_product(
            Y, A, dview=dview_res, block_size=block_size,
            transpose=True) * spdiags(old_div(1., nA), 0, nr + nb, nr + nb)
    else:
        YA = (A.T.dot(Y).T) * spdiags(old_div(1., nA), 0, nr + nb, nr + nb)
    print('Done')

    # print np.allclose(YA,YA1)

    AA = ((A.T.dot(A)) * spdiags(old_div(1., nA), 0, nr + nb, nr + nb)).tocsr()

    YrA = YA - Cin.T.dot(AA)
    #YrA = ((A.T.dot(Y)).T-Cin.T.dot(A.T.dot(A)))*spdiags(1./nA,0,nr+1,nr+1)

    Cin = np.array(Cin.todense())
    for iter in range(ITER):
        O, lo = update_order(A.tocsc()[:, :nr])
        P_ = []

        for count, jo_ in enumerate(O):
            jo = np.array(list(jo_))
            #Ytemp = YrA[:,jo.flatten()] + (np.dot(np.diag(nA[jo]),Cin[jo,:])).T
            Ytemp = YrA[:, jo.flatten()] + Cin[jo, :].T
            Ctemp = np.zeros((np.size(jo), T))
            Stemp = np.zeros((np.size(jo), T))
            btemp = np.zeros((np.size(jo), 1))
            sntemp = btemp.copy()
            c1temp = btemp.copy()
            gtemp = np.zeros((np.size(jo), kwargs['p']))
            nT = nA[jo]

            #args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, bl[jo[jj]], c1[jo[jj]], g[jo[jj]], sn[jo[jj]], kwargs) for jj in range(len(jo))]
            args_in = [(np.squeeze(np.array(Ytemp[:, jj])), nT[jj], jj, None,
                        None, None, None, kwargs) for jj in range(len(jo))]
            #            import pdb
            #            pdb.set_trace()
            if dview is not None:
                #
                if debug:

                    results = dview.map_async(constrained_foopsi_parallel,
                                              args_in)

                    results.get()

                    for outp in results.stdout:

                        print((outp[:-1]))

                        sys.stdout.flush()

                    for outp in results.stderr:

                        print((outp[:-1]))

                        sys.stderr.flush()

                else:

                    results = dview.map_sync(constrained_foopsi_parallel,
                                             args_in)

            else:

                results = list(map(constrained_foopsi_parallel, args_in))

            for chunk in results:

                pars = dict()

                C_, Sp_, Ytemp_, cb_, c1_, sn_, gn_, jj_ = chunk

                Ctemp[jj_, :] = C_[None, :]

                Stemp[jj_, :] = Sp_

                Ytemp[:, jj_] = Ytemp_[:, None]

                btemp[jj_] = cb_

                c1temp[jj_] = c1_

                sntemp[jj_] = sn_

                gtemp[jj_, :] = gn_.T

                bl[jo[jj_]] = cb_

                c1[jo[jj_]] = c1_

                sn[jo[jj_]] = sn_

                g[jo[jj_]] = gn_.T if kwargs['p'] > 0 else []  #gtemp[jj,:]

                pars['b'] = cb_

                pars['c1'] = c1_

                pars['neuron_sn'] = sn_

                pars['gn'] = gtemp[jj_, np.abs(gtemp[jj_, :]) > 0]

                pars['neuron_id'] = jo[jj_]

                P_.append(pars)

            YrA -= (Ctemp - C[jo, :]).T * AA[jo, :]
            #YrA[:,jo] = Ytemp
            C[jo, :] = Ctemp.copy()

            S[jo, :] = Stemp

            #           if (np.sum(lo[:jo])+1)%1 == 0:
            print((str(np.sum(lo[:count + 1])) + ' out of total ' + str(nr) +
                   ' temporal components updated'))

        ii = nr

        # Delete those who do not spike(?)

        #YrA[:,ii] = YrA[:,ii] + np.atleast_2d(Cin[ii,:]).T
        #cc = np.maximum(YrA[:,ii],0)
        for ii in np.arange(nr, nr + nb):
            cc = np.maximum(YrA[:, ii] + np.atleast_2d(Cin[ii, :]).T, 0)
            YrA -= (cc - np.atleast_2d(Cin[ii, :]).T) * AA[ii, :]
            C[ii, :] = cc.T
        #YrA = YA - C.T.dot(AA)
        #YrA[:,ii] = YrA[:,ii] - np.atleast_2d(C[ii,:]).T

        if dview is not None:
            dview.results.clear()

        if old_div(scipy.linalg.norm(Cin - C, 'fro'),
                   scipy.linalg.norm(C, 'fro')) <= 1e-3:
            # stop if the overall temporal component does not change by much
            print(
                "stopping: overall temporal component not changing significantly"
            )
            break
        else:
            Cin = C

    f = C[nr:, :]
    C = C[:nr, :]
    YrA = np.array(YrA[:, :nr]).T
    P_ = sorted(P_, key=lambda k: k['neuron_id'])

    return C, f, S, bl, c1, sn, g, YrA  #,P_
Exemple #3
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3,normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1, method_ls='nnls_L0'):

    """update spatial footprints and background through Basis Pursuit Denoising 

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty        
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated 

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)


    """
    C=np.array(C)
    if normalize_yyt_one:
#        cct=np.diag(C.dot(C.T))
        nr_C=np.shape(C)[0]
        d = scipy.sparse.lil_matrix((nr_C,nr_C))
        d.setdiag(np.sqrt(np.sum(C**2,1)))
        A_in=A_in*d
        C=old_div(C,np.sqrt(np.sum(C**2,1)[:,np.newaxis]))   


    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not isinstance(Y, basestring):
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception('Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print("spatial support for each components given by the user")
        if C is None:
            INDav = old_div(IND.astype('float32'), np.sum(IND, axis=0))
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(
            A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore, dview=dview)
        print("found spatial support for each component")
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print((np.shape(A_in)))

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)       # number of neurons

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print(('cluster temporary folder:' + tmpf))
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()


    if dview is None:

        Y_name = Y
        C_name = Cf

    else:

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif isinstance(Y, basestring) or dview is None:
            Y_name = Y
        else:
            raise Exception('Not implemented consistently')
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
# for i in range(0, np.prod(dims) - n_pixels_per_process + 1,
# n_pixels_per_process)]
    cct=np.diag(C.dot(C.T))
    rank_f=nb
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(range(i, i + n_pixels_per_process)), method_ls, cct,rank_f])

    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(range(i, np.prod(dims))), method_ls, cct,rank_f])

    A_ = np.zeros((d, nr + np.size(f, 0)))
    print('Starting Update Spatial Components')

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(regression_ipyparallel, pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
##
#        Cf_ = [Cf[idx_, :] for idx_ in ind2_]
#
#        #% LARS regression
#        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))
#        
#        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
#            if px % 1000 == 0:
#                print px
#            if np.size(c) > 0:
#                _, _, a, _, _ = lars_regression_noise_old(y, np.array(c.T), 1, sn[px]**2 * T)
#                if np.isscalar(a):
#                    A_[px, id2_] = a
#                else:
#                    A_[px, id2_] = a.T
##

    #%
    print('Updated Spatial Components')

    A_ = threshold_components(A_, dims, dview=dview, medw=(3, 3), thr_method=thr_method, maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc,
                              se=se, ss=ss)

    print("threshold")
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    # pdb.set_trace()
#    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y,f.T,block_size=5000,dview=dview) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    print("Computing A_bas")
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)  # update baseline based on residual
    # A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print(("--- %s seconds ---" % (time.time() - start_time)))

    try:  # clean up
        # remove temporary file created
        print("Remove temporary file created")
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    if A_in.dtype == bool:

        return A_, b, C, f
    else:
        return A_, b, C
Exemple #4
0
def update_temporal_components(Y, A, b, Cin, fin, bl = None,  c1 = None, g = None,  sn = None, nb = 1, ITER=2, method_foopsi='constrained_foopsi', memory_efficient=False, debug=False, dview=None,**kwargs):
    """Update temporal components and background given spatial components using a block coordinate descent approach.

    Parameters
    -----------    

    Y: np.ndarray (2D)
        input data with time in the last axis (d x T)
    A: sparse matrix (crc format)
        matrix of temporal components (d x K)
    b: ndarray (dx1)
        current estimate of background component
    Cin: np.ndarray
        current estimate of temporal components (K x T)   
    fin: np.ndarray
        current estimate of temporal background (vector of length T)
    g:  np.ndarray
        Global time constant (not used)
    bl: np.ndarray
       baseline for fluorescence trace for each column in A
    c1: np.ndarray
       initial concentration for each column in A
    g:  np.ndarray       
       discrete time constant for each column in A
    sn: np.ndarray
       noise level for each column in A       
    nb: [optional] int
        Number of background components
    ITER: positive integer
        Maximum number of block coordinate descent loops. 
    method_foopsi: string
        Method of deconvolution of neural activity. constrained_foopsi is the only method supported at the moment.               
    n_processes: int
        number of processes to use for parallel computation. Should be less than the number of processes started with ipcluster.
    backend: 'str'
        single_thread no parallelization
        ipyparallel, parallelization using the ipyparallel cluster. You should start the cluster (install ipyparallel and then type 
        ipcluster -n 6, where 6 is the number of processes). 
        SLURM: using SLURM scheduler
    memory_efficient: Bool
        whether or not to optimize for memory usage (longer running times). nevessary with very large datasets  
    **kwargs: dict
        all parameters passed to constrained_foopsi except bl,c1,g,sn (see documentation). Some useful parameters are      
    p: int
        order of the autoregression model
    method: [optional] string
        solution method for constrained foopsi. Choices are
            'cvx':      using cvxopt and picos (slow especially without the MOSEK solver)
            'cvxpy':    using cvxopt and cvxpy with the ECOS solver (faster, default)

    solvers: list string
            primary and secondary (if problem unfeasible for approx solution) solvers to be used with cvxpy, default is ['ECOS','SCS']

    Note
    --------

    The temporal components are updated in parallel by default by forming of sequence of vertex covers.  

    Returns
    --------

    C:   np.ndarray
            matrix of temporal components (K x T)
    f:   np.array
            vector of temporal background (length T) 
    S:   np.ndarray            
            matrix of merged deconvolved activity (spikes) (K x T)
    bl:  float  
            same as input    
    c1:  float
            same as input    
    g:   float
            same as input    
    sn:  float
            same as input 
    YrA: np.ndarray
            matrix of spatial component filtered raw data, after all contributions have been removed.            
            YrA corresponds to the residual trace for each component and is used for faster plotting (K x T)

    """

    if 'p' not in kwargs or kwargs['p'] is None:
        raise Exception("You have to provide a value for p")

    d,T = np.shape(Y);    
    nr = np.shape(A)[-1]

    if b is not None:
        if b.shape[0]<b.shape[1]:
            b = b.T
        nb = b.shape[1]

    if  bl is None:
        bl=np.repeat(None,nr)

    if  c1 is None:
        c1=np.repeat(None,nr)

    if  g is None:
        g=np.repeat(None,nr)

    if  sn is None:
        sn=np.repeat(None,nr)                        

    A = scipy.sparse.hstack((A,coo_matrix(b)))
    S = np.zeros(np.shape(Cin));
    Cin =  np.vstack((Cin,fin));
    C = Cin;
    nA = np.squeeze(np.array(np.sum(np.square(A.todense()),axis=0)))


    Cin=coo_matrix(Cin)
    #YrA = ((A.T.dot(Y)).T-Cin.T.dot(A.T.dot(A)))
    print ('Generating residuals')
#    YA = (A.T.dot(Y).T)*spdiags(1./nA,0,nr+nb,nr+nb)


    if 'memmap' in str(type(Y)):
        YA = parallel_dot_product(Y,A,dview=None,block_size=20000,transpose=True)*spdiags(old_div(1.,nA),0,nr+nb,nr+nb)
    else:
        YA = (A.T.dot(Y).T)*spdiags(old_div(1.,nA),0,nr+nb,nr+nb)
    print ('Done')
   # 
#    print np.allclose(YA,YA1)

    AA = ((A.T.dot(A))*spdiags(old_div(1.,nA),0,nr+nb,nr+nb)).tocsr()

    YrA = YA - Cin.T.dot(AA)
    #YrA = ((A.T.dot(Y)).T-Cin.T.dot(A.T.dot(A)))*spdiags(1./nA,0,nr+1,nr+1)


    Cin=np.array(Cin.todense())    
    for iter in range(ITER):
        O,lo = update_order(A.tocsc()[:,:nr])
        P_=[];
        for count,jo_ in enumerate(O):
            jo=np.array(list(jo_))           
            #Ytemp = YrA[:,jo.flatten()] + (np.dot(np.diag(nA[jo]),Cin[jo,:])).T
            Ytemp = YrA[:,jo.flatten()] + Cin[jo,:].T
            Ctemp = np.zeros((np.size(jo),T))
            Stemp = np.zeros((np.size(jo),T))
            btemp = np.zeros((np.size(jo),1))
            sntemp = btemp.copy()
            c1temp = btemp.copy()
            gtemp = np.zeros((np.size(jo),kwargs['p']));
            nT = nA[jo]            

#            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, bl[jo[jj]], c1[jo[jj]], g[jo[jj]], sn[jo[jj]], kwargs) for jj in range(len(jo))]
            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, None, None, None, None, kwargs) for jj in range(len(jo))]
#            import pdb
#            pdb.set_trace()
            if dview is not None:                    
                #
                if debug:                

                    results = dview.map_async(constrained_foopsi_parallel,args_in)  

                    results.get()

                    for outp in results.stdout:   

                        print((outp[:-1]))  

                        sys.stdout.flush()            

                    for outp in results.stderr:   

                        print((outp[:-1]))  

                        sys.stderr.flush()            

                else:

                    results = dview.map_sync(constrained_foopsi_parallel,args_in)

            else:

                results = list(map(constrained_foopsi_parallel,args_in))            


            for chunk in results:

                pars=dict()

                C_,Sp_,Ytemp_,cb_,c1_,sn_,gn_,jj_=chunk                    

                Ctemp[jj_,:] = C_[None,:]

                Stemp[jj_,:] = Sp_               

                Ytemp[:,jj_] = Ytemp_[:,None]            

                btemp[jj_] = cb_

                c1temp[jj_] = c1_

                sntemp[jj_] = sn_   

                gtemp[jj_,:] = gn_.T  

                bl[jo[jj_]] = cb_

                c1[jo[jj_]] = c1_

                sn[jo[jj_]] = sn_

                g[jo[jj_]]  = gn_.T if kwargs['p'] > 0 else [] #gtemp[jj,:]

                pars['b'] = cb_

                pars['c1'] = c1_                 

                pars['neuron_sn'] = sn_

                pars['gn'] = gtemp[jj_,np.abs(gtemp[jj_,:])>0] 

                pars['neuron_id'] = jo[jj_]

                P_.append(pars)

            YrA -= (Ctemp-C[jo,:]).T*AA[jo,:]
            #YrA[:,jo] = Ytemp
            C[jo,:] = Ctemp.copy()            

            S[jo,:] = Stemp

#            if (np.sum(lo[:jo])+1)%1 == 0:
            print((str(np.sum(lo[:count+1])) + ' out of total ' + str(nr) + ' temporal components updated'))

        ii=nr        


        #YrA[:,ii] = YrA[:,ii] + np.atleast_2d(Cin[ii,:]).T
        #cc = np.maximum(YrA[:,ii],0) 
        for ii in np.arange(nr,nr+nb):       
            cc = np.maximum(YrA[:,ii] + np.atleast_2d(Cin[ii,:]).T,0)
            YrA -= (cc-np.atleast_2d(Cin[ii,:]).T)*AA[ii,:]      
            C[ii,:] = cc.T
        #YrA = YA - C.T.dot(AA)
        #YrA[:,ii] = YrA[:,ii] - np.atleast_2d(C[ii,:]).T                

        if dview is not None:       
            dview.results.clear()   


        if old_div(scipy.linalg.norm(Cin - C,'fro'),scipy.linalg.norm(C,'fro')) <= 1e-3:
            # stop if the overall temporal component does not change by much
            print("stopping: overall temporal component not changing significantly")
            break
        else:
            Cin = C

    f = C[nr:,:]
    C = C[:nr,:]
    YrA = np.array(YrA[:,:nr]).T    
    P_ = sorted(P_, key=lambda k: k['neuron_id']) 


    return C,f,S,bl,c1,sn,g,YrA #,P_