Esempio n. 1
0
def fft_psd_multithreading(args):
    """helper function to parallelize get_noise_fft

    Parameters:
    -----------
    Y: ndarray
        input movie (n_pixels x Time), can be also memory mapped file

    sn_s: ndarray (memory mapped)
        file where to store the results of computation.

    i: int
        pixel index start

    num_pixels: int
        number of pixel to select starting from i

    **kwargs: dict
        arguments to be passed to get_noise_fft

    """
    (Y, i, num_pixels, kwargs) = args
    Yold = Y
    if type(Y) is str:
        Y, _, _ = load_memmap(Y)

    idxs = range(i, i + num_pixels)
    res, psx = get_noise_fft(Y[idxs], **kwargs)

    #print("[Worker %d] sn for row %d is %f" % (os.getpid(), i, sn_s[0]))
    return (idxs, res, psx)
Esempio n. 2
0
def fft_psd_multithreading(args):
    """helper function to parallelize get_noise_fft

    Parameters:
    -----------
    Y: ndarray
        input movie (n_pixels x Time), can be also memory mapped file

    sn_s: ndarray (memory mapped)
        file where to store the results of computation.

    i: int
        pixel index start

    num_pixels: int
        number of pixel to select starting from i

    **kwargs: dict
        arguments to be passed to get_noise_fft

    """
    (Y,i,num_pixels,kwargs)=args
    Yold=Y
    if type(Y) is str:
        Y,_,_=load_memmap(Y)
    
    idxs=range(i,i+num_pixels)
    res,psx=get_noise_fft(Y[idxs], **kwargs)

    #print("[Worker %d] sn for row %d is %f" % (os.getpid(), i, sn_s[0]))
    return (idxs,res,psx)
Esempio n. 3
0
def lars_regression_noise_ipyparallel(pars):

    # need to import since it is run from within the server
    import numpy as np
    import os
    import sys
    import gc

    Y_name, C_name, noise_sn, idxs_C, idxs_Y = pars

    Y, _, _ = load_memmap(Y_name)
    Y = np.array(Y[idxs_Y, :])
    

    
    C = np.load(C_name, mmap_mode='r')
    C = np.array(C)
    _, T = np.shape(C)
    #sys.stdout = open(str(os.getpid()) + ".out", "w")
    As = []
    # print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])
    sys.stdout.flush()
    for y, px in zip(Y, idxs_Y):
        # print str(time.time()-st) + ": Pixel" + str(px)
        sys.stdout.flush()
#        print px,len(idxs_C),C.shape
        c = C[idxs_C[px], :]
        if np.size(c) > 0:
            sn = noise_sn[px]**2 * T
            _, _, a, _, _ = lars_regression_noise(y, c.T, 1, sn)
            if not np.isscalar(a):
                a = a.T

            As.append((px, idxs_C[px], a))

    del Y
    del C
    gc.collect()

    return As
Esempio n. 4
0
def lars_regression_noise_ipyparallel(pars):

    # need to import since it is run from within the server
    import numpy as np
    import os
    import sys
    import gc

    Y_name, C_name, noise_sn, idxs_C, idxs_Y = pars

    Y, _, _ = load_memmap(Y_name)
    Y = np.array(Y[idxs_Y, :])

    C = np.load(C_name, mmap_mode='r')
    C = np.array(C)
    _, T = np.shape(C)
    #sys.stdout = open(str(os.getpid()) + ".out", "w")
    As = []
    # print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])
    sys.stdout.flush()
    for y, px in zip(Y, idxs_Y):
        # print str(time.time()-st) + ": Pixel" + str(px)
        sys.stdout.flush()
        #        print px,len(idxs_C),C.shape
        c = C[idxs_C[px], :]
        if np.size(c) > 0:
            sn = noise_sn[px]**2 * T
            _, _, a, _, _ = lars_regression_noise(y, c.T, 1, sn)
            if not np.isscalar(a):
                a = a.T

            As.append((px, idxs_C[px], a))

    del Y
    del C
    gc.collect()

    return As
Esempio n. 5
0
def cnmf_patches(args_in):
    import numpy as np
    import ca_source_extraction as cse
    import time
    import logging

        
#    file_name, idx_,shapes,p,gSig,K,fudge_fact=args_in
    file_name, idx_,shapes,options=args_in
    
    name_log=file_name[:-5]+ '_LOG_ ' + str(idx_[0])+'_'+str(idx_[-1])
    logger = logging.getLogger(name_log)
    hdlr = logging.FileHandler('./'+name_log)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr) 
    logger.setLevel(logging.INFO)
        
    
    
    p=options['temporal_params']['p']
    
    logger.info('START')
    Yr,_,_,_=load_memmap(file_name)    
    logger.info('Read file')
    
        
    Yr=Yr[idx_,:]
    Yr.filename=file_name
    d,T=Yr.shape      
    Y=np.reshape(Yr,(shapes[1],shapes[0],T),order='F')  
    Y.filename=file_name
#    ssub,tsub = options['patch_params']['ssub'],options['patch_params']['tsub']
#    if ssub>1 or tsub>1:
#        Y = cse.initialization.downscale_local_mean(Y,(ssub,ssub,tsub))
     
    [d1,d2,T]=Y.shape
#    pl.imshow(np.mean(Y,axis=-1))
#    pl.pause(.1)
#    import pdb
#    pdb.set_trace()
#    options = cse.utilities.CNMFSetParms(Y,p=p,gSig=gSig,K=K)
    options['spatial_params']['d2']=d1
    options['spatial_params']['d1']=d2
    
    
    Yr,sn,g,psx=cse.pre_processing.preprocess_data(Yr,**options['preprocess_params'])
    logger.info('Preprocess Data')
    
    Ain, Cin, b_in, f_in, center=cse.initialization.initialize_components(Y, **options['init_params']) 
    logger.info('Initialize Components')                                                       

    A,b,Cin = cse.spatial.update_spatial_components(Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])  
    options['temporal_params']['p'] = 0 # set this to zero for fast updating without deconvolution
    logger.info('Spatial Update')                                                           
    
    
    C,f,S,bl,c1,neurons_sn,g,YrA = cse.temporal.update_temporal_components(Yr,A,b,Cin,f_in,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
    logger.info('Temporal Update')  
    
    A_m,C_m,nr_m,merged_ROIs,S_m,bl_m,c1_m,sn_m,g_m=cse.merging.merge_components(Yr,A,b,C,f,S,sn,options['temporal_params'], options['spatial_params'], bl=bl, c1=c1, sn=neurons_sn, g=g, thr=options['merging']['thr'], fast_merge = True)
    logger.info('Merge Components')                                                       
    
    A2,b2,C2 = cse.spatial.update_spatial_components(Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
    logger.info('Update Spatial II')                                                       
    options['temporal_params']['p'] = p # set it back to original value to perform full deconvolution
    C2,f2,S2,bl2,c12,neurons_sn2,g21,YrA = cse.temporal.update_temporal_components(Yr,A2,b2,C2,f,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
    logger.info('Update Temporal II')                                                       
    Y=[]
    Yr=[]
    
    return idx_,shapes,A2,b2,C2,f2,S2,bl2,c12,neurons_sn2,g21,sn,options
Esempio n. 6
0
def cnmf_patches(args_in):
    import numpy as np
    import ca_source_extraction as cse
    import time
    import logging

    #    file_name, idx_,shapes,p,gSig,K,fudge_fact=args_in
    file_name, idx_, shapes, options = args_in

    name_log = os.path.basename(file_name[:-5]) + '_LOG_ ' + str(
        idx_[0]) + '_' + str(idx_[-1])
    logger = logging.getLogger(name_log)
    hdlr = logging.FileHandler('./' + name_log)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)

    p = options['temporal_params']['p']

    logger.info('START')

    logger.info('Read file')
    Yr, _, _ = load_memmap(file_name)

    Yr = Yr[idx_, :]

    if (np.sum(np.abs(np.diff(Yr)))) > 0.1:

        Yr.filename = file_name
        d, T = Yr.shape
        Y = np.reshape(Yr, (shapes[1], shapes[0], T), order='F')
        Y.filename = file_name

        [d1, d2, T] = Y.shape

        options['spatial_params']['dims'] = (d1, d2)
        logger.info('Preprocess Data')
        Yr, sn, g, psx = cse.pre_processing.preprocess_data(
            Yr, **options['preprocess_params'])

        logger.info('Initialize Components')
        Ain, Cin, b_in, f_in, center = cse.initialization.initialize_components(
            Y, **options['init_params'])
        #        import pdb
        #        pdb.set_trace()
        nA = np.squeeze(np.array(np.sum(np.square(Ain), axis=0)))
        nr = len(nA)
        Cin = coo_matrix(Cin)

        YA = (Ain.T.dot(Yr).T) * scipy.sparse.spdiags(1. / nA, 0, nr, nr)
        AA = ((Ain.T.dot(Ain)) * scipy.sparse.spdiags(1. / nA, 0, nr, nr))
        YrA = YA - Cin.T.dot(AA)
        Cin = Cin.todense()

        if options['patch_params']['only_init']:

            return idx_, shapes, coo_matrix(
                Ain
            ), b_in, Cin, f_in, None, None, None, None, g, sn, options, YrA.T

        else:

            logger.info('Spatial Update')
            A, b, Cin = cse.spatial.update_spatial_components(
                Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])
            options['temporal_params'][
                'p'] = 0  # set this to zero for fast updating without deconvolution

            logger.info('Temporal Update')
            C, f, S, bl, c1, neurons_sn, g, YrA = cse.temporal.update_temporal_components(
                Yr,
                A,
                b,
                Cin,
                f_in,
                bl=None,
                c1=None,
                sn=None,
                g=None,
                **options['temporal_params'])

            logger.info('Merge Components')
            A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = cse.merging.merge_components(
                Yr,
                A,
                b,
                C,
                f,
                S,
                sn,
                options['temporal_params'],
                options['spatial_params'],
                bl=bl,
                c1=c1,
                sn=neurons_sn,
                g=g,
                thr=options['merging']['thr'],
                fast_merge=True)

            logger.info('Update Spatial II')
            A2, b2, C2 = cse.spatial.update_spatial_components(
                Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])

            logger.info('Update Temporal II')
            options['temporal_params'][
                'p'] = p  # set it back to original value to perform full deconvolution
            C2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = cse.temporal.update_temporal_components(
                Yr,
                A2,
                b2,
                C2,
                f,
                bl=None,
                c1=None,
                sn=None,
                g=None,
                **options['temporal_params'])

            Y = []
            Yr = []

            logger.info('Done!')
            return idx_, shapes, A2, b2, C2, f2, S2, bl2, c12, neurons_sn2, g21, sn, options, YrA

    else:
        return None
Esempio n. 7
0
def update_spatial_components(Y, C, f, A_in, sn=None, d1=None, d2=None, min_size=3, max_size=8, dist=3, 
                              method='ellipse', expandCore=None, backend='single_thread', n_processes=4, n_pixels_per_process=128 ):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D)
        movie, raw data in 2D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    Ain: np.ndarray
        spatial profile of background activity.

    d1: [optional] int
        x movie dimension

    d2: [optional] int
        y movie dimension

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    n_processes: [optional] int
        number of threads to use when the backend is multiprocessing,threading, or ipyparallel

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them


    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion


    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    """
    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')
    
    if Y.ndim<2 and not type(Y) is str:
        Y = np.atleast_2d(Y)
        
    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    C = np.atleast_2d(C)
    if C.shape[1] == 1:
        raise Exception('Dimension of Matrix C must be neurons x time')

    f = np.atleast_2d(f)
    if f.shape[1] == 1:
        raise Exception('Dimension of Matrix f must be neurons x time ')

    if len(A_in.shape) == 1:
        A_in = np.atleast_2d(A_in).T

    if A_in.shape[0] == 1:
        raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    Cf = np.vstack((C, f))  # create matrix that include background components

    [d, T] = np.shape(Y)    

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    nr, _ = np.shape(C)       # number of neurons
    
    IND = determine_search_location(
        A_in, d1, d2, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore)
    print " find search location"


    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    folder = tempfile.mkdtemp()

    # use the ipyparallel package, you need to start a cluster server
    # (ipcluster command) in order to use it
    if backend == 'ipyparallel':

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename            
        # if not create a memory mapped version (necessary for parallelization)
        elif type(Y) is str:
            Y_name = Y            
        else:
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)            
            Y,_,_,_=load_memmap(Y_name)    

        # create arguments to be passed to the function. Here we are grouping
        # bunch of pixels to be processed by each thread
        pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
                        for i in range(0, d1 * d2 - n_pixels_per_process + 1, n_pixels_per_process)]

        A_ = np.zeros((d, nr + np.size(f, 0)))
    
        try:  # if server is not running and raise exception if not installed or not started
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise

        if len(c) < n_processes:
            print len(c)
            raise Exception(
                "the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")

        dview = c[:n_processes]  # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)                        
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel, pixel_groups)
        # clean up
       
        
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
        
        dview.results.clear()
        c.purge_results('all')
        c.purge_everything()
        c.close()

    elif backend == 'single_thread':

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1, sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T

    else:
        raise Exception(
            'Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel')
    
    #%
    print 'Updated Spatial Components'
   
    A_ = threshold_components(A_, d1, d2)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
    

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)
    
#    import pdb 
#    pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update
    # baseline based on residual
    b = A_bas

    print("--- %s seconds ---" % (time.time() - start_time))

    try:  # clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    return A_, b, C
Esempio n. 8
0
def update_spatial_components(Y,
                              C=None,
                              f=None,
                              A_in=None,
                              sn=None,
                              dims=None,
                              min_size=3,
                              max_size=8,
                              dist=3,
                              method='ellipse',
                              expandCore=None,
                              dview=None,
                              n_pixels_per_process=128,
                              medw=(3, 3),
                              thr_method='nrg',
                              maxthr=0.1,
                              nrgthr=0.9999,
                              extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int),
                              ss=np.ones((3, 3), dtype=np.int),
                              nb=1):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            
            
    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details
        
    nb: [optional] int
        Number of background components

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    """

    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1),
                                       2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not type(Y) is str:
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception(
                'Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.'
        )

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print "spatial support for each components given by the user"
        if C is None:
            INDav = IND.astype('float32') / np.sum(IND, axis=0)
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(
                csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(A_in,
                                        dims,
                                        method=method,
                                        min_size=min_size,
                                        max_size=max_size,
                                        dist=dist,
                                        expandCore=expandCore,
                                        dview=dview)
        print "found spatial support for each component"
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print np.shape(A_in)

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)  # number of neurons

    ind2_ = [
        np.hstack(
            (np.where(iid_)[0], nr +
             np.arange(f.shape[0]))) if np.size(np.where(iid_)[0]) > 0 else []
        for iid_ in IND
    ]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print 'cluster temporary folder:' + tmpf
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()

    # use the ipyparallel package, you need to start a cluster server
    # (ipcluster command) in order to use it

    C_name = os.path.join(folder, 'C_temp.npy')
    np.save(C_name, Cf)

    if type(
            Y
    ) is np.core.memmap:  # if input file is already memory mapped then find the filename
        Y_name = Y.filename
    # if not create a memory mapped version (necessary for parallelization)
    elif type(Y) is str or dview is None:
        Y_name = Y
    else:
        raise Exception('Not implemented consistently')
        Y_name = os.path.join(folder, 'Y_temp.npy')
        np.save(Y_name, Y)
        Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
#                    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process)]
    pixel_groups = []
    for i in range(0,
                   np.prod(dims) - n_pixels_per_process + 1,
                   n_pixels_per_process):
        pixel_groups.append(
            [Y_name, C_name, sn, ind2_,
             range(i, i + n_pixels_per_process)])

    if i < np.prod(dims):
        pixel_groups.append(
            [Y_name, C_name, sn, ind2_,
             range(i, np.prod(dims))])

    A_ = np.zeros((d, nr + np.size(f, 0)))

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel,
                                         pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        #        parallel_result = map(lars_regression_noise_ipyparallel, pixel_groups)
        #        for chunk in parallel_result:
        #            for pars in chunk:
        #                px, idxs_, a = pars
        #                A_[px, idxs_] = a

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1,
                                                      sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T
#

#%
    print 'Updated Spatial Components'

    A_ = threshold_components(A_,
                              dims,
                              dview=dview,
                              medw=(3, 3),
                              thr_method=thr_method,
                              maxthr=maxthr,
                              nrgthr=nrgthr,
                              extract_cc=extract_cc,
                              se=se,
                              ss=ss)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)  # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    #pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))),
                    0)  # update baseline based on residual
    #A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print("--- %s seconds ---" % (time.time() - start_time))

    try:  # clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    if A_in.dtype == bool:
        return A_, b, C, f
    else:
        return A_, b, C
Esempio n. 9
0
def cnmf_patches(args_in):
    import numpy as np
    import ca_source_extraction as cse
    import time
    import logging

    #    file_name, idx_,shapes,p,gSig,K,fudge_fact=args_in
    file_name, idx_, shapes, options = args_in

    name_log = file_name[:-5] + '_LOG_ ' + str(idx_[0]) + '_' + str(idx_[-1])
    logger = logging.getLogger(name_log)
    hdlr = logging.FileHandler('./' + name_log)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)

    p = options['temporal_params']['p']

    logger.info('START')
    Yr, _, _, _ = load_memmap(file_name)
    logger.info('Read file')

    Yr = Yr[idx_, :]
    Yr.filename = file_name
    d, T = Yr.shape
    Y = np.reshape(Yr, (shapes[1], shapes[0], T), order='F')
    Y.filename = file_name
    #    ssub,tsub = options['patch_params']['ssub'],options['patch_params']['tsub']
    #    if ssub>1 or tsub>1:
    #        Y = cse.initialization.downscale_local_mean(Y,(ssub,ssub,tsub))

    [d1, d2, T] = Y.shape
    #    pl.imshow(np.mean(Y,axis=-1))
    #    pl.pause(.1)
    #    import pdb
    #    pdb.set_trace()
    #    options = cse.utilities.CNMFSetParms(Y,p=p,gSig=gSig,K=K)
    options['spatial_params']['d2'] = d1
    options['spatial_params']['d1'] = d2

    Yr, sn, g, psx = cse.pre_processing.preprocess_data(
        Yr, **options['preprocess_params'])
    logger.info('Preprocess Data')

    Ain, Cin, b_in, f_in, center = cse.initialization.initialize_components(
        Y, **options['init_params'])
    logger.info('Initialize Components')

    A, b, Cin = cse.spatial.update_spatial_components(
        Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])
    options['temporal_params'][
        'p'] = 0  # set this to zero for fast updating without deconvolution
    logger.info('Spatial Update')

    C, f, S, bl, c1, neurons_sn, g, YrA = cse.temporal.update_temporal_components(
        Yr,
        A,
        b,
        Cin,
        f_in,
        bl=None,
        c1=None,
        sn=None,
        g=None,
        **options['temporal_params'])
    logger.info('Temporal Update')

    A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = cse.merging.merge_components(
        Yr,
        A,
        b,
        C,
        f,
        S,
        sn,
        options['temporal_params'],
        options['spatial_params'],
        bl=bl,
        c1=c1,
        sn=neurons_sn,
        g=g,
        thr=options['merging']['thr'],
        fast_merge=True)
    logger.info('Merge Components')

    A2, b2, C2 = cse.spatial.update_spatial_components(
        Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
    logger.info('Update Spatial II')
    options['temporal_params'][
        'p'] = p  # set it back to original value to perform full deconvolution
    C2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = cse.temporal.update_temporal_components(
        Yr,
        A2,
        b2,
        C2,
        f,
        bl=None,
        c1=None,
        sn=None,
        g=None,
        **options['temporal_params'])
    logger.info('Update Temporal II')
    Y = []
    Yr = []

    return idx_, shapes, A2, b2, C2, f2, S2, bl2, c12, neurons_sn2, g21, sn, options
Esempio n. 10
0
def update_spatial_components(Y,
                              C,
                              f,
                              A_in,
                              sn=None,
                              d1=None,
                              d2=None,
                              min_size=3,
                              max_size=8,
                              dist=3,
                              method='ellipse',
                              expandCore=None,
                              backend='single_thread',
                              n_processes=4,
                              n_pixels_per_process=128):
    """update spatial footprints and background through Basis Pursuit Denoising

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D)
        movie, raw data in 2D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    Ain: np.ndarray
        spatial profile of background activity.

    d1: [optional] int
        x movie dimension

    d2: [optional] int
        y movie dimension

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    n_processes: [optional] int
        number of threads to use when the backend is multiprocessing,threading, or ipyparallel

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them


    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion


    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)

    """
    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1),
                                       2).astype(int)

    if d1 is None or d2 is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not type(Y) is str:
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    C = np.atleast_2d(C)
    if C.shape[1] == 1:
        raise Exception('Dimension of Matrix C must be neurons x time')

    f = np.atleast_2d(f)
    if f.shape[1] == 1:
        raise Exception('Dimension of Matrix f must be neurons x time ')

    if len(A_in.shape) == 1:
        A_in = np.atleast_2d(A_in).T

    if A_in.shape[0] == 1:
        raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    Cf = np.vstack((C, f))  # create matrix that include background components

    [d, T] = np.shape(Y)

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.'
        )

    nr, _ = np.shape(C)  # number of neurons

    IND = determine_search_location(A_in,
                                    d1,
                                    d2,
                                    method=method,
                                    min_size=min_size,
                                    max_size=max_size,
                                    dist=dist,
                                    expandCore=expandCore)
    print " find search location"

    ind2_ = [
        np.hstack(
            (np.where(iid_)[0], nr +
             np.arange(f.shape[0]))) if np.size(np.where(iid_)[0]) > 0 else []
        for iid_ in IND
    ]

    folder = tempfile.mkdtemp()

    # use the ipyparallel package, you need to start a cluster server
    # (ipcluster command) in order to use it
    if backend == 'ipyparallel':

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(
                Y
        ) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif type(Y) is str:
            Y_name = Y
        else:
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

        # create arguments to be passed to the function. Here we are grouping
        # bunch of pixels to be processed by each thread
        pixel_groups = [(Y_name, C_name, sn, ind2_,
                         range(i, i + n_pixels_per_process))
                        for i in range(0, d1 * d2 - n_pixels_per_process +
                                       1, n_pixels_per_process)]

        A_ = np.zeros((d, nr + np.size(f, 0)))

        try:  # if server is not running and raise exception if not installed or not started
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise

        if len(c) < n_processes:
            print len(c)
            raise Exception(
                "the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value"
            )

        dview = c[:n_processes]  # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel,
                                         pixel_groups)
        # clean up

        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a

        dview.results.clear()
        c.purge_results('all')
        c.purge_everything()
        c.close()

    elif backend == 'single_thread':

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1,
                                                      sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T

    else:
        raise Exception(
            'Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel'
        )

    #%
    print 'Updated Spatial Components'

    A_ = threshold_components(A_, d1, d2)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)  # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #    import pdb
    #    pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2,
                    0)  # update baseline based on residual
    # A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update
    # baseline based on residual
    b = A_bas

    print("--- %s seconds ---" % (time.time() - start_time))

    try:  # clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    return A_, b, C