def concat_convert_from_mmap(full_file_paths, reshape=True):

    folder_path, mmap_path_start = os.path.split(full_file_paths[0])
    mmap_path_end = os.path.split(full_file_paths[-1])[1]
    mmap_path_out = mmap_path_start[:11] + mmap_path_end[:11]
    print('starting chunk with', mmap_path_start)

    #load 1st array
    frames_0, dims, T = mmp.load_memmap(full_file_paths[0])
    concatenated = np.array(frames_0)
    dims_list = [dims]
    T_list = [T]

    #join arrays frame by frame
    for file_name in full_file_paths[1:]:
        frames, dims, T = mmp.load_memmap(file_name)
        concatenated = np.concatenate((concatenated, np.array(frames)), axis=1)
        dims_list.append(dims)
        T_list.append(T)

    if reshape:
        print('reshaping')
        reshaped = caiman_mmap_to_hdf(concatenated, dims)

    output_fname = save_to_hdf(folder_path, mmap_path_out, reshaped)
    print(mmap_path_out, 'done')
    return ()
예제 #2
0
def test_load_raises_multiple_ext():
    fname = "a.mmap.mma"
    try:
        mmapping.load_memmap(fname)
    except ValueError:
        assert True
    else:
        assert False
def convert_from_mmap(full_file_path):
    folder_path, mmap_path = os.path.split(full_file_path)
    frames, dims, T = mmp.load_memmap(full_file_path)
    frames_array = np.array(frames)
    output_fname = save_to_hdf(folder_path, mmap_path, frames_array)
    print(mmap_path, 'done')
    return ()
예제 #4
0
def fft_psd_multithreading(args):
    """helper function to parallelize get_noise_fft

    Parameters:
    -----------
    Y: ndarray
        input movie (n_pixels x Time), can be also memory mapped file

    sn_s: ndarray (memory mapped)
        file where to store the results of computation.

    i: int
        pixel index start

    num_pixels: int
        number of pixel to select starting from i

    **kwargs: dict
        arguments to be passed to get_noise_fft

    """
    (Y,i,num_pixels,kwargs)=args
    Yold=Y
    if isinstance(Y,basestring):
        Y,_,_=load_memmap(Y)

    idxs=list(range(i,i+num_pixels))
#    import pdb
#    pdb.set_trace()
    print(len(idxs))
#    print(kwargs)
    res,psx=get_noise_fft(Y[idxs], **kwargs)

    #print("[Worker %d] sn for row %d is %f" % (os.getpid(), i, sn_s[0]))
    return (idxs,res,psx)
예제 #5
0
def fft_psd_multithreading(args):
    """helper function to parallelize get_noise_fft

    Parameters:
    -----------
    Y: ndarray
        input movie (n_pixels x Time), can be also memory mapped file

    sn_s: ndarray (memory mapped)
        file where to store the results of computation.

    i: int
        pixel index start

    num_pixels: int
        number of pixel to select starting from i

    **kwargs: dict
        arguments to be passed to get_noise_fft

    """
    (Y, i, num_pixels, kwargs) = args
    Yold = Y
    if isinstance(Y, basestring):
        Y, _, _ = load_memmap(Y)

    idxs = list(range(i, i + num_pixels))
    #    import pdb
    #    pdb.set_trace()
    print(len(idxs))
    #    print(kwargs)
    res, psx = get_noise_fft(Y[idxs], **kwargs)

    #print("[Worker %d] sn for row %d is %f" % (os.getpid(), i, sn_s[0]))
    return (idxs, res, psx)
예제 #6
0
def function_place_holder(args_in):

    file_name, idx_,shapes,function, args, kwargs = args_in
    Yr, _, _ = load_memmap(file_name)   
    Yr = Yr[idx_,:]
    Yr.filename=file_name
    d,T=Yr.shape      
    Y=np.reshape(Yr,(shapes[1],shapes[0],T),order='F').transpose([2,0,1])           
    [T,d1,d2]=Y.shape

    res_fun = function(Y,*args,**kwargs)
    if type(res_fun) is not tuple:

        if res_fun.shape == (d1,d2):
            print('** reshaping form 2D to 1D')
            res_fun = np.reshape(res_fun,d1*d2,order = 'F')

    return res_fun
예제 #7
0
def save_portion(pars):

    big_mov,d,tot_frames,fnames,idx_start,idx_end =pars
    big_mov = np.memmap(big_mov, mode='r+', dtype=np.float32,shape=(d, tot_frames), order='C')
    Ttot=0
    Yr_tot=np.zeros((idx_end-idx_start,tot_frames))    
    print((Yr_tot.shape))
    for f in fnames:        
        print(f)
        Yr,dims,T=load_memmap(f)        
        print((idx_start,idx_end))
        Yr_tot[:,Ttot:Ttot+T]=np.array(Yr[idx_start:idx_end])
        Ttot=Ttot+T
        del Yr

    big_mov[idx_start:idx_end,:]=Yr_tot
    del Yr_tot
    print('done')
    del big_mov    
    return Ttot
예제 #8
0
def save_portion(pars):

    big_mov,d,tot_frames,fnames,idx_start,idx_end =pars
    big_mov = np.memmap(big_mov, mode='r+', dtype=np.float32,shape=(d, tot_frames), order='C')
    Ttot=0
    Yr_tot=np.zeros((idx_end-idx_start,tot_frames))    
    print((Yr_tot.shape))
    for f in fnames:        
        print(f)
        Yr,dims,T=load_memmap(f)        
        print((idx_start,idx_end))
        Yr_tot[:,Ttot:Ttot+T]=np.array(Yr[idx_start:idx_end])
        Ttot=Ttot+T
        del Yr

    big_mov[idx_start:idx_end,:]=Yr_tot
    del Yr_tot
    print('done')
    del big_mov    
    return Ttot
예제 #9
0
def dot_place_holder(par):
    from caiman.mmapping import load_memmap
    import pickle
    A_name,idx_to_pass,b_,transpose = par
    A_, _, _  = load_memmap(A_name)      
    b_ = pickle.loads(b_)

#     import pdb
#     pdb.set_trace()    
    #print((idx_to_pass[-1]))


    if 'sparse' in str(type(b_)):
        if transpose:
            
            outp = (b_.T.tocsc()[:,idx_to_pass].dot(A_[idx_to_pass])).T
#            import pdb
#            pdb.set_trace()
            del b_        
            return idx_to_pass, outp            
        
        else:    
            
            outp = (b_.T.dot(A_[idx_to_pass].T)).T
            del b_
            return idx_to_pass,outp

    else:
        
        if transpose:
        
            outp = A_[idx_to_pass].dot(b_[idx_to_pass])  
            del b_
            return idx_to_pass, outp
        
        else:
            
            outp = A_[idx_to_pass].dot(b_)  
            del b_
            return idx_to_pass,outp
예제 #10
0
def dot_place_holder(par):
    from caiman.mmapping import load_memmap
    import pickle
    A_name,idx_to_pass,b_,transpose = par
    A_, _, _  = load_memmap(A_name)      
    b_ = pickle.loads(b_)

#     import pdb
#     pdb.set_trace()    
    print((idx_to_pass[-1]))


    if 'sparse' in str(type(b_)):
        if transpose:
            return idx_to_pass,(b_.T.tocsc()[:,idx_to_pass].dot(A_[idx_to_pass])).T             
        else:             
            return idx_to_pass,(b_.T.dot(A_[idx_to_pass].T)).T

    else:
        if transpose:
            return idx_to_pass,A_[idx_to_pass].dot(b_[idx_to_pass])  
        else:
            return idx_to_pass,A_[idx_to_pass].dot(b_)  
예제 #11
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3, normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1, method_ls='nnls_L0'):
    """update spatial footprints and background through Basis Pursuit Denoising 

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty        
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated 

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)
    f: np.ndarray
        same as f_in except if empty component deleted.

    """
    C = np.array(C)
    if normalize_yyt_one:
        #        cct=np.diag(C.dot(C.T))
        nr_C = np.shape(C)[0]
        d = scipy.sparse.lil_matrix((nr_C, nr_C))
        d.setdiag(np.sqrt(np.sum(C**2, 1)))
        A_in = A_in * d
        C = old_div(C, np.sqrt(np.sum(C**2, 1)[:, np.newaxis]))

    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not isinstance(Y, basestring):
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception('Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        print(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decreasing suitably.')
        n_pixels_per_process = d

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print("spatial support for each components given by the user")
        if C is None:
            INDav = old_div(IND.astype('float32'), np.sum(IND, axis=0))
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(
            A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore, dview=dview)
        print("found spatial support for each component")
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print((np.shape(A_in)))

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)       # number of neurons

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print(('cluster temporary folder:' + tmpf))
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()

    if dview is None:

        Y_name = Y
        C_name = Cf

    else:

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif isinstance(Y, basestring) or dview is None:
            Y_name = Y
        else:
            raise Exception('Not implemented consistently')
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
# for i in range(0, np.prod(dims) - n_pixels_per_process + 1,
# n_pixels_per_process)]
    cct = np.diag(C.dot(C.T))
    rank_f = nb
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, i + n_pixels_per_process)), method_ls, cct, rank_f])

    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(
            range(i, np.prod(dims))), method_ls, cct, rank_f])

    A_ = np.zeros((d, nr + np.size(f, 0)))
    print('Starting Update Spatial Components')

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(regression_ipyparallel, pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
##
#        Cf_ = [Cf[idx_, :] for idx_ in ind2_]
#
#        #% LARS regression
#        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))
#
#        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
#            if px % 1000 == 0:
#                print px
#            if np.size(c) > 0:
#                _, _, a, _, _ = lars_regression_noise_old(y, np.array(c.T), 1, sn[px]**2 * T)
#                if np.isscalar(a):
#                    A_[px, id2_] = a
#                else:
#                    A_[px, id2_] = a.T
##

    #%
    print('Updated Spatial Components')

    A_ = threshold_components(A_, dims, dview=dview, medw=medw, thr_method=thr_method,
                              maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc, se=se, ss=ss)

    print("threshold")
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating {} empty components!!'.format(len(ff)))
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
        background_ff = list(filter(lambda i: i > 0, ff-nr))
        f = np.delete(f, background_ff, 0)
        nr = nr - (len(ff) - len(background_ff))
        nb = nb - len(background_ff)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    # pdb.set_trace()
#    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y, f.T, block_size=1000, dview=dview) - \
            A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    print("Computing A_bas")
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)  # update baseline based on residual
    # A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print(("--- %s seconds ---" % (time.time() - start_time)))

    try:  # clean up
        # remove temporary file created
        print("Remove temporary file created")
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)


    # if A_in.dtype == bool:
    #     return A_, b, C, f
    # else:
    #     return A_, b, C
    return A_, b, C, f
예제 #12
0
def regression_ipyparallel(pars):

    # need to import since it is run from within the server
    import numpy as np
    import sys
    import gc
    from sklearn import linear_model

    Y_name, C_name, noise_sn, idxs_C, idxs_Y, method_least_square, cct, rank_f = pars

    if isinstance(Y_name, basestring):
       # print("Reloading Y")
        Y, _, _ = load_memmap(Y_name)
        Y = np.array(Y[idxs_Y, :])
    else:
        Y = Y_name[idxs_Y, :]

    if isinstance(C_name, basestring):
        #print("Reloading Y")
        C = np.load(C_name, mmap_mode='r')
        C = np.array(C)
    else:
        C = C_name

    _, T = np.shape(C)
    #sys.stdout = open(str(os.getpid()) + ".out", "w")
    As = []
    # print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])
    print('updating lars')
#    import os
#    print('**' + str(os.environ['OPENBLAS_NUM_THREADS']))
    for y, px in zip(Y, idxs_Y):
        # print str(time.time()-st) + ": Pixel" + str(px)
        #        print px,len(idxs_C),C.shape
        c = C[idxs_C[px], :]
        idx_only_neurons = idxs_C[px]
        cct_ = cct[idx_only_neurons[:-rank_f]]

        if np.size(c) > 0:
            sn = noise_sn[px]**2 * T

            if method_least_square == 'lasso_lars_old':  # lasso lars from old implementation, will be deprecated

                a = lars_regression_noise_old(y, c.T, 1, sn)[2]

            elif method_least_square == 'nnls_L0':  # Nonnegative least square with L0 penalty
                a = nnls_L0(c.T, y, 1.2 * sn)

            elif method_least_square == 'lasso_lars':  # lasso lars function from scikit learn
                #a, RSS = scipy.optimize.nnls(c.T, np.ravel(y))
                #                RSS = RSS * RSS
                #                if RSS <= 2*sn:  # hard noise constraint hardly feasible
                lambda_lasso = .5 * noise_sn[px] * np.sqrt(np.max(cct_)) / T
#                lambda_lasso=1
                clf = linear_model.LassoLars(alpha=lambda_lasso, positive=True)
                a_lrs = clf.fit(np.array(c.T), np.ravel(y))
                a = a_lrs.coef_
#                else:
#                    print 'Problem infeasible'
#                    pl.cla()
#                    pl.plot(a.T.dot(c));
#                    pl.plot(y)
#                    pl.pause(3)

            else:
                raise Exception('Least Square Method not found!' + method_least_square)

            if not np.isscalar(a):
                a = a.T

            As.append((px, idxs_C[px], a))

    print('clearing variables')
    if isinstance(Y_name, basestring):
        #print("deleting Y")
        del Y

    if isinstance(C_name, basestring):
        del C

    if isinstance(Y_name, basestring):
        gc.collect()
    print('done!')
    return As
예제 #13
0
def load(file_name,fr=30,start_time=0,meta_data=None,subindices=None,shape=None,num_frames_sub_idx=np.inf):
    '''
    load movie from file.

    Parameters
    -----------
    file_name: string
        name of file. Possible extensions are tif, avi, npy, (npz and hdf5 are usable only if saved by calblitz)
    fr: float
        frame rate
    start_time: float
        initial time for frame 1
    meta_data: dict
        same as for calblitz.movie
    subindices: iterable indexes
        for loading only portion of the movie
    shape: tuple of two values
        dimension of the movie along x and y if loading from a two dimensional numpy array

    Returns
    -------
    mov: calblitz.movie

    '''

    # case we load movie from file
    if os.path.exists(file_name):

        name, extension = os.path.splitext(file_name)[:2]

        if extension == '.tif' or extension == '.tiff':  # load avi file
            if subindices is not None:
                input_arr = imread(file_name)[subindices, :, :]
            else:
                input_arr = imread(file_name)
            input_arr = np.squeeze(input_arr)


        elif extension == '.avi': # load avi file
            if subindices is not None:
                raise Exception('Subindices not implemented')
            cap = cv2.VideoCapture(file_name)
            try:
                length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            except:
                print('Roll back top opencv 2')
                length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
                width  = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
                height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))

            input_arr=np.zeros((length, height,width),dtype=np.uint8)
            counter=0
            ret=True
            while True:
                # Capture frame-by-frame
                ret, frame = cap.read()
                if not ret:
                    break
                input_arr[counter]=frame[:,:,0]
                counter=counter+1
                if not counter%100:
                    print(counter)

            # When everything done, release the capture
            cap.release()
            cv2.destroyAllWindows()

        elif extension == '.npy': # load npy file
            if subindices is not None:
                input_arr=np.load(file_name)[subindices]
            else:
                input_arr=np.load(file_name)
            if input_arr.ndim==2:
                if shape is not None:
                    d,T=np.shape(input_arr)
                    d1,d2=shape
                    input_arr=np.transpose(np.reshape(input_arr,(d1,d2,T),order='F'),(2,0,1))
                else:
                    raise Exception('Loaded vector is 2D , you need to provide the shape parameter')

        elif extension == '.mat': # load npy file
            input_arr=loadmat(file_name)['data']
            input_arr=np.rollaxis(input_arr,2,-3)
            if subindices is not None:
                input_arr=input_arr[subindices]


        elif extension == '.npz': # load movie from saved file
            if subindices is not None:
                raise Exception('Subindices not implemented')
            with np.load(file_name) as f:
                return movie(**f)

        elif extension== '.hdf5':
            with h5py.File(file_name, "r") as f:
                attrs=dict(f['mov'].attrs)
                #print attrs
                if meta_data in attrs:
                    attrs['meta_data']=cpk.loads(attrs['meta_data'])

                if subindices is None:
#                    fr=f['fr'],start_time=f['start_time'],file_name=f['file_name']
                    return movie(f['mov'],**attrs)
                else:
                    return movie(f['mov'][subindices],**attrs)
        elif extension == '.mmap':

            filename=os.path.split(file_name)[-1]
            fpart=filename.split('_')[1:-1]

            Yr, dims, T = load_memmap(filename)
            d1, d2 = dims
            images = np.reshape(Yr.T, [T] + list(dims), order='F')

            print('mmap')
            return movie(images,fr=fr)

        elif extension == '.sbx':

            print('sbx')

            return movie(sbxread(file_name[:-4],num_frames_sub_idx).transpose([0,3,2,1]),fr=fr)


        else:
            raise Exception('Unknown file type')
    else:
        raise Exception('File not found!')

    return movie(input_arr,fr=fr,start_time=start_time,file_name=os.path.split(file_name)[-1], meta_data=meta_data)
예제 #14
0
        else:
            pars.append([f,os.path.splitext(f)[0],resize_fact[idx],remove_init,idx_xy,order,xy_shifts[idx],add_to_movie,border_to_0])            

    if dview is not None:
        fnames_new=dview.map_sync(save_place_holder,pars)
    else:
        fnames_new=list(map(save_place_holder,pars))

    return fnames_new
#%%
def save_memmap_join(mmap_fnames,base_name=None, n_chunks=6, dview=None,async=False):

    tot_frames=0
    order='C'
    for f in mmap_fnames:
        Yr,dims,T=load_memmap(f)
        print((f,T))
        tot_frames+=T
        del Yr


    d=np.prod(dims)

    if base_name is None:        
        base_name = mmap_fnames[0]
        base_name = base_name[:base_name.find('_d1_')]+'-#-'+str(len(mmap_fnames)) 

    fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(1 if len(dims) == 2 else dims[2]) + '_order_' + str(order) + '_frames_' + str(tot_frames) + '_.mmap'
    fname_tot = os.path.join(os.path.split(mmap_fnames[0])[0],fname_tot)         

    print(fname_tot)
예제 #15
0
def cnmf_patches(args_in):
    import numpy as np
    import caiman as cm
    import time
    import logging
    from caiman.source_extraction.cnmf import cnmf as cnmf

    #    file_name, idx_,shapes,p,gSig,K,fudge_fact=args_in
    file_name, idx_, shapes, options = args_in

    name_log = os.path.basename(file_name[:-5]) + '_LOG_ ' + str(
        idx_[0]) + '_' + str(idx_[-1])
    logger = logging.getLogger(name_log)
    hdlr = logging.FileHandler('./' + name_log)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)

    p = options['temporal_params']['p']

    logger.info('START')

    logger.info('Read file')
    Yr, _, _ = load_memmap(file_name)

    Yr = Yr[idx_, :]

    if (np.sum(np.abs(np.diff(Yr)))) > 0.1:

        #Yr.filename=file_name
        d, T = Yr.shape

        #        Y=np.reshape(Yr,(shapes[1],shapes[0],T),order='F')
        #        Y.filename=file_name

        #        dims = shapes[1],shapes[0]
        dims = shapes  #shapes[1],shapes[0],shapes[2]
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        #images.filename=file_name

        cnm = cnmf.CNMF(n_processes = 1, k = options['init_params']['K'], gSig = options['init_params']['gSig'], merge_thresh = options['merging']['thr'], p = p, dview = None,  Ain = None,  Cin = None, f_in = None, do_merge = True,\
                                        ssub = options['init_params']['ssub'], tsub = options['init_params']['ssub'], p_ssub = 1, p_tsub = 1, method_init = options['init_params']['method'], alpha_snmf = options['init_params']['alpha_snmf'],\
                                        rf=None,stride=None, memory_fact=1, gnb = options['init_params']['nb'],\
                                        only_init_patch = options['patch_params']['only_init']\
                                        ,method_deconvolution =  options['temporal_params']['method'], n_pixels_per_process = options['preprocess_params']['n_pixels_per_process'],\
                                        block_size = options['temporal_params']['block_size'], check_nan = options['preprocess_params']['check_nan'],\
                                        skip_refinement = options['patch_params']['skip_refinement'],N_iterations_refinement=options['patch_params']['nIter'])

        cnm = cnm.fit(images)

        Yr = []
        images = []

        return idx_, shapes, scipy.sparse.coo_matrix(
            cnm.A
        ), cnm.b, cnm.C, cnm.f, cnm.S, cnm.bl, cnm.c1, cnm.neurons_sn, cnm.g, cnm.sn, cnm.options, cnm.YrA.T

#        [d1,d2,T]=Y.shape
#
#        options['spatial_params']['dims']=(d1,d2)
#        logger.info('Preprocess Data')
#        Yr,sn,g,psx=cm.source_extraction.cnmf.pre_processing.preprocess_data(Yr,**options['preprocess_params'])
#
#
#        logger.info('Initialize Components')
#
#        Ain, Cin, b_in, f_in, center=cm.source_extraction.cnmf.initialization.initialize_components(Y, **options['init_params'])
#
#        nA = np.squeeze(np.array(np.sum(np.square(Ain),axis=0)))
#
#        nr=nA.size
#        Cin=coo_matrix(Cin)
#
#
#        YA = (Ain.T.dot(Yr).T)*scipy.sparse.spdiags(old_div(1.,nA),0,nr,nr)
#        AA = ((Ain.T.dot(Ain))*scipy.sparse.spdiags(old_div(1.,nA),0,nr,nr))
#        YrA = YA - Cin.T.dot(AA)
#        Cin=Cin.todense()
#
#        if options['patch_params']['only_init']:
#
#            return idx_,shapes, coo_matrix(Ain), b_in, Cin, f_in, None, None , None, None, g, sn, options, YrA.T
#
#        else:
#
#            raise Exception('Bug here, need to double check. For now set ["patch_params"]["only_init"] = True')
#            logger.info('Spatial Update')
#            A,b,Cin, f_in = cm.source_extraction.cnmf.spatial.update_spatial_components(Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])
#            options['temporal_params']['p'] = 0 # set this to zero for fast updating without deconvolution
#
#            import pdb
#            pdb.set_trace()
#
#            logger.info('Temporal Update')
#            C,f,S,bl,c1,neurons_sn,g,YrA = cm.source_extraction.cnmf.temporal.update_temporal_components(Yr,A,b,Cin,f_in,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
#
#            logger.info('Merge Components')
#            A_m,C_m,nr_m,merged_ROIs,S_m,bl_m,c1_m,sn_m,g_m=cm.source_extraction.cnmf.merging.merge_components(Yr,A,b,C,f,S,sn,options['temporal_params'], options['spatial_params'], bl=bl, c1=c1, sn=neurons_sn, g=g, thr=options['merging']['thr'], fast_merge = True)
#
#            logger.info('Update Spatial II')
#            A2,b2,C2,f = cm.source_extraction.cnmf.spatial.update_spatial_components(Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
#
#            logger.info('Update Temporal II')
#            options['temporal_params']['p'] = p # set it back to original value to perform full deconvolution
#            C2,f2,S2,bl2,c12,neurons_sn2,g21,YrA = cm.source_extraction.cnmf.temporal.update_temporal_components(Yr,A2,b2,C2,f,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
#
#
#            Y=[]
#            Yr=[]
#
#            logger.info('Done!')
#            return idx_,shapes,A2,b2,C2,f2,S2,bl2,c12,neurons_sn2,g21,sn,options,YrA

    else:
        return None
예제 #16
0
def cnmf_patches(args_in):
    import numpy as np
    import caiman as cm
    import time
    import logging

    #    file_name, idx_,shapes,p,gSig,K,fudge_fact=args_in
    file_name, idx_, shapes, options = args_in

    name_log = os.path.basename(file_name[:-5]) + '_LOG_ ' + str(
        idx_[0]) + '_' + str(idx_[-1])
    logger = logging.getLogger(name_log)
    hdlr = logging.FileHandler('./' + name_log)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)

    p = options['temporal_params']['p']

    logger.info('START')

    logger.info('Read file')
    Yr, _, _ = load_memmap(file_name)

    Yr = Yr[idx_, :]

    if (np.sum(np.abs(np.diff(Yr)))) > 0.1:

        Yr.filename = file_name
        d, T = Yr.shape

        Y = np.reshape(Yr, (shapes[1], shapes[0], T), order='F')
        Y.filename = file_name

        [d1, d2, T] = Y.shape

        options['spatial_params']['dims'] = (d1, d2)
        logger.info('Preprocess Data')
        Yr, sn, g, psx = cm.source_extraction.cnmf.pre_processing.preprocess_data(
            Yr, **options['preprocess_params'])

        logger.info('Initialize Components')

        Ain, Cin, b_in, f_in, center = cm.source_extraction.cnmf.initialization.initialize_components(
            Y, **options['init_params'])

        nA = np.squeeze(np.array(np.sum(np.square(Ain), axis=0)))

        nr = nA.size
        Cin = coo_matrix(Cin)

        YA = (Ain.T.dot(Yr).T) * scipy.sparse.spdiags(old_div(1., nA), 0, nr,
                                                      nr)
        AA = ((Ain.T.dot(Ain)) *
              scipy.sparse.spdiags(old_div(1., nA), 0, nr, nr))
        YrA = YA - Cin.T.dot(AA)
        Cin = Cin.todense()

        if options['patch_params']['only_init']:

            return idx_, shapes, coo_matrix(
                Ain
            ), b_in, Cin, f_in, None, None, None, None, g, sn, options, YrA.T

        else:

            logger.info('Spatial Update')
            A, b, Cin = cm.source_extraction.cnmf.spatial.update_spatial_components(
                Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])

            options['temporal_params'][
                'p'] = 0  # set this to zero for fast updating without deconvolution

            logger.info('Temporal Update')
            C, f, S, bl, c1, neurons_sn, g, YrA = cm.source_extraction.cnmf.temporal.update_temporal_components(
                Yr,
                A,
                b,
                Cin,
                f_in,
                bl=None,
                c1=None,
                sn=None,
                g=None,
                **options['temporal_params'])

            options['temporal_params'][
                'p'] = p  # set it back to original value to perform full deconvolution

            logger.info('Merge Components')
            A, C, nr, merged_ROIs, S, bl, c1, neurons_sn, g = cm.source_extraction.cnmf.merging.merge_components(
                Yr,
                A,
                b,
                C,
                f,
                S,
                sn,
                options['temporal_params'],
                options['spatial_params'],
                bl=bl,
                c1=c1,
                sn=neurons_sn,
                g=g,
                thr=options['merging']['thr'],
                fast_merge=True)

            for it in range(options['patch_params']['nIter']):

                logger.info('Starting Iteration ' + str(it))

                # spatial update
                A, b, C = cm.source_extraction.cnmf.spatial.update_spatial_components(
                    Yr, C, f, A, sn=sn, **options['spatial_params'])

                # temporal update
                C, f, S, bl, c1, neurons_sn, g, YrA = cm.source_extraction.cnmf.temporal.update_temporal_components(
                    Yr,
                    A,
                    b,
                    C,
                    f,
                    bl=None,
                    c1=None,
                    sn=None,
                    g=None,
                    **options['temporal_params'])

                # merge
                A, C, nr, merged_ROIs, S, bl, c1, neurons_sn, g = cm.source_extraction.cnmf.merging.merge_components(
                    Yr,
                    A,
                    b,
                    C,
                    f,
                    S,
                    sn,
                    options['temporal_params'],
                    options['spatial_params'],
                    bl=bl,
                    c1=c1,
                    sn=neurons_sn,
                    g=g,
                    thr=options['merging']['thr'],
                    mx=50,
                    fast_merge=True)

            Y = []
            Yr = []

            logger.info('Done!')
            return idx_, shapes, A, b, C, f, S, bl, c1, neurons_sn, g, sn, options, YrA

    else:
        return None
예제 #17
0
def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, min_size=3, max_size=8, dist=3,normalize_yyt_one=True,
                              method='ellipse', expandCore=None, dview=None, n_pixels_per_process=128,
                              medw=(3, 3), thr_method='nrg', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
                              se=np.ones((3, 3), dtype=np.int), ss=np.ones((3, 3), dtype=np.int), nb=1, method_ls='nnls_L0'):

    """update spatial footprints and background through Basis Pursuit Denoising 

    for each pixel i solve the problem
        [A(i,:),b(i)] = argmin sum(A(i,:))
    subject to
        || Y(i,:) - A(i,:)*C + b(i)*f || <= sn(i)*sqrt(T);

    for each pixel the search is limited to a few spatial components

    Parameters
    ----------
    Y: np.ndarray (2D or 3D)
        movie, raw data in 2D or 3D (pixels x time).
    C: np.ndarray
        calcium activity of each neuron.
    f: np.ndarray
        temporal profile  of background activity.
    A_in: np.ndarray
        spatial profile of background activity. If A_in is boolean then it defines the spatial support of A. 
        Otherwise it is used to determine it through determine_search_location

    dims: [optional] tuple
        x, y[, z] movie dimensions

    min_size: [optional] int

    max_size: [optional] int

    dist: [optional] int


    sn: [optional] float
        noise associated with each pixel if known

    backend [optional] str
        'ipyparallel', 'single_thread'
        single_thread:no parallelization. It can be used with small datasets.
        ipyparallel: uses ipython clusters and then send jobs to each of them
        SLURM: use the slurm scheduler

    n_pixels_per_process: [optional] int
        number of pixels to be processed by each thread


    method: [optional] string
        method used to expand the search for pixels 'ellipse' or 'dilate'

    expandCore: [optional]  scipy.ndimage.morphology
        if method is dilate this represents the kernel used for expansion

    dview: view on ipyparallel client
            you need to create an ipyparallel client and pass a view on the processors (client = Client(), dview=client[:])            

    medw, thr_method, maxthr, nrgthr, extract_cc, se, ss: [optional]
        Parameters for components post-processing. Refer to spatial.threshold_components for more details

    nb: [optional] int
        Number of background components

    method_ls:
        method to perform the regression for the basis pursuit denoising.
             'nnls_L0'. Nonnegative least square with L0 penalty        
             'lasso_lars' lasso lars function from scikit learn
             'lasso_lars_old' lasso lars from old implementation, will be deprecated 

        normalize_yyt_one: bool
            wheter to norrmalize the C and A matrices so that diag(C*C.T) are ones

    Returns
    --------
    A: np.ndarray
         new estimate of spatial footprints
    b: np.ndarray
        new estimate of spatial background
    C: np.ndarray
         temporal components (updated only when spatial components are completely removed)


    """
    C=np.array(C)
    if normalize_yyt_one:
#        cct=np.diag(C.dot(C.T))
        nr_C=np.shape(C)[0]
        d = scipy.sparse.lil_matrix((nr_C,nr_C))
        d.setdiag(np.sqrt(np.sum(C**2,1)))
        A_in=A_in*d
        C=old_div(C,np.sqrt(np.sum(C**2,1)[:,np.newaxis]))   


    if expandCore is None:
        expandCore = iterate_structure(generate_binary_structure(2, 1), 2).astype(int)

    if dims is None:
        raise Exception('You need to define the input dimensions')

    if Y.ndim < 2 and not isinstance(Y, basestring):
        Y = np.atleast_2d(Y)

    if Y.shape[1] == 1:
        raise Exception('Dimension of Matrix Y must be pixels x time')

    if C is not None:
        C = np.atleast_2d(C)
        if C.shape[1] == 1:
            raise Exception('Dimension of Matrix C must be neurons x time')

    if f is not None:
        f = np.atleast_2d(f)
        if f.shape[1] == 1:
            raise Exception('Dimension of Matrix f must be background comps x time ')

    if (A_in is None) and (C is None):
        raise Exception('Either A or C need to be determined')

    if A_in is not None:
        if len(A_in.shape) == 1:
            A_in = np.atleast_2d(A_in).T

        if A_in.shape[0] == 1:
            raise Exception('Dimension of Matrix A must be pixels x neurons ')

    start_time = time.time()

    [d, T] = np.shape(Y)

    if A_in is None:
        A_in = np.ones((d, np.shape(C)[1]), dtype=bool)

    if n_pixels_per_process > d:
        raise Exception(
            'The number of pixels per process (n_pixels_per_process) is larger than the total number of pixels!! Decrease suitably.')

    if f is not None:
        nb = f.shape[0]
    else:
        if b is not None:
            nb = b.shape[1]

    if A_in.dtype == bool:
        IND = A_in.copy()
        print("spatial support for each components given by the user")
        if C is None:
            INDav = old_div(IND.astype('float32'), np.sum(IND, axis=0))
            px = (np.sum(IND, axis=1) > 0)
            model = NMF(n_components=nb, init='random', random_state=0)
            b = model.fit_transform(np.maximum(Y[~px, :], 0))
            f = model.components_.squeeze()
            #f = np.mean(Y[~px,:],axis=0)
            Y_resf = np.dot(Y, f.T)
            b = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T)), 0))
            #b = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)
            C = np.fmax(csr_matrix(INDav.T).dot(Y) - np.outer(INDav.T.dot(b), f), 0)
            f = np.atleast_2d(f)

    else:
        IND = determine_search_location(
            A_in, dims, method=method, min_size=min_size, max_size=max_size, dist=dist, expandCore=expandCore, dview=dview)
        print("found spatial support for each component")
        if C is None:
            raise Exception('You need to provide estimate of C and f')

    print((np.shape(A_in)))

    Cf = np.vstack((C, f))  # create matrix that include background components
    nr, _ = np.shape(C)       # number of neurons

    ind2_ = [np.hstack((np.where(iid_)[0], nr + np.arange(f.shape[0])))
             if np.size(np.where(iid_)[0]) > 0 else [] for iid_ in IND]

    if os.environ.get('SLURM_SUBMIT_DIR') is not None:
        tmpf = os.environ.get('SLURM_SUBMIT_DIR')
        print(('cluster temporary folder:' + tmpf))
        folder = tempfile.mkdtemp(dir=tmpf)
    else:
        folder = tempfile.mkdtemp()


    if dview is None:

        Y_name = Y
        C_name = Cf

    else:

        C_name = os.path.join(folder, 'C_temp.npy')
        np.save(C_name, Cf)

        if type(Y) is np.core.memmap:  # if input file is already memory mapped then find the filename
            Y_name = Y.filename
        # if not create a memory mapped version (necessary for parallelization)
        elif isinstance(Y, basestring) or dview is None:
            Y_name = Y
        else:
            raise Exception('Not implemented consistently')
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)
            Y, _, _, _ = load_memmap(Y_name)

    # create arguments to be passed to the function. Here we are grouping
    # bunch of pixels to be processed by each thread
#    pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
# for i in range(0, np.prod(dims) - n_pixels_per_process + 1,
# n_pixels_per_process)]
    cct=np.diag(C.dot(C.T))
    rank_f=nb
    pixel_groups = []
    for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(range(i, i + n_pixels_per_process)), method_ls, cct,rank_f])

    if i < np.prod(dims):
        pixel_groups.append([Y_name, C_name, sn, ind2_, list(range(i, np.prod(dims))), method_ls, cct,rank_f])

    A_ = np.zeros((d, nr + np.size(f, 0)))
    print('Starting Update Spatial Components')

    #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
    if dview is not None:
        parallel_result = dview.map_sync(regression_ipyparallel, pixel_groups)
        dview.results.clear()
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
    else:
        parallel_result = list(map(regression_ipyparallel, pixel_groups))
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
##
#        Cf_ = [Cf[idx_, :] for idx_ in ind2_]
#
#        #% LARS regression
#        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))
#        
#        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
#            if px % 1000 == 0:
#                print px
#            if np.size(c) > 0:
#                _, _, a, _, _ = lars_regression_noise_old(y, np.array(c.T), 1, sn[px]**2 * T)
#                if np.isscalar(a):
#                    A_[px, id2_] = a
#                else:
#                    A_[px, id2_] = a.T
##

    #%
    print('Updated Spatial Components')

    A_ = threshold_components(A_, dims, dview=dview, medw=(3, 3), thr_method=thr_method, maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc,
                              se=se, ss=ss)

    print("threshold")
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)

    #import pdb
    # pdb.set_trace()
#    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print("Computing residuals")
    if 'memmap' in str(type(Y)):
        Y_resf = parallel_dot_product(Y,f.T,block_size=5000,dview=dview) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    else:
        Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))

    print("Computing A_bas")
    A_bas = np.fmax(Y_resf.dot(np.linalg.inv(f.dot(f.T))), 0)  # update baseline based on residual
    # A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # baseline based on residual
    b = A_bas

    print(("--- %s seconds ---" % (time.time() - start_time)))

    try:  # clean up
        # remove temporary file created
        print("Remove temporary file created")
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    if A_in.dtype == bool:

        return A_, b, C, f
    else:
        return A_, b, C
예제 #18
0
def regression_ipyparallel(pars):

    # need to import since it is run from within the server
    import numpy as np
    import sys
    import gc
    from sklearn import linear_model        

    Y_name, C_name, noise_sn, idxs_C, idxs_Y,method_least_square,cct,rank_f = pars

    if isinstance(Y_name, basestring):
       # print("Reloading Y")
        Y, _, _ = load_memmap(Y_name)
        Y = np.array(Y[idxs_Y, :])
    else:
        Y = Y_name[idxs_Y, :]

    if  isinstance(C_name, basestring): 
        #print("Reloading Y")           
        C = np.load(C_name, mmap_mode='r')
        C = np.array(C)
    else:
        C = C_name

    _, T = np.shape(C)
    #sys.stdout = open(str(os.getpid()) + ".out", "w")
    As = []
    # print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])

    for y, px in zip(Y, idxs_Y):
        # print str(time.time()-st) + ": Pixel" + str(px)
#        print px,len(idxs_C),C.shape
        c = C[idxs_C[px], :]
        idx_only_neurons=idxs_C[px]
        cct_=cct[idx_only_neurons[:-rank_f]]

        if np.size(c) > 0:
            sn = noise_sn[px]**2 * T

            if method_least_square == 'lasso_lars_old': # lasso lars from old implementation, will be deprecated 

                a = lars_regression_noise_old(y, c.T, 1, sn)[2]

            elif method_least_square == 'nnls_L0': #   Nonnegative least square with L0 penalty   
                a = nnls_L0( c.T,y,1.2*sn)

            elif method_least_square == 'lasso_lars': # lasso lars function from scikit learn
                #a, RSS = scipy.optimize.nnls(c.T, np.ravel(y))
#                RSS = RSS * RSS                
#                if RSS <= 2*sn:  # hard noise constraint hardly feasible                    
                lambda_lasso=.5*noise_sn[px]*np.sqrt(np.max(cct_))/T 
#                lambda_lasso=1
                clf = linear_model.LassoLars(alpha=lambda_lasso,positive=True)   
                a_lrs = clf.fit(np.array(c.T),np.ravel(y))                    
                a = a_lrs.coef_
#                else:
#                    print 'Problem infeasible'
#                    pl.cla()
#                    pl.plot(a.T.dot(c));
#                    pl.plot(y)
#                    pl.pause(3)

            else:
                raise Exception('Least Square Method not found!'+method_least_square)

            if not np.isscalar(a):
                a = a.T

            As.append((px, idxs_C[px], a))

    if isinstance(Y_name,basestring):
        #print("deleting Y")
        del Y

    if isinstance(C_name,basestring):           
        del C

    if isinstance(Y_name,basestring):        
        gc.collect()

    return As
예제 #19
0
def test_load_successful_3d():
    fname = pathlib.Path(caiman_datadir()) / "testdata" / THREE_D_FNAME
    Yr, (d1, d2, d3), T = mmapping.load_memmap(str(fname))
    assert (d1, d2, d3) == (10, 11, 13)
    assert T == 12
    assert isinstance(Yr, np.memmap)
예제 #20
0
    n_chunks: number of chunks in which to subdivide when saving, smaller requires more memory
    dview: cluster handle
    async: somtimes it will not work asynchrounously, try this if it fails
    
    Returns:
    --------
    
    '''

    tot_frames = 0

    order = 'C'

    for f in mmap_fnames:

        Yr, dims, T = load_memmap(f)

        print((f, T))

        tot_frames += T

        del Yr

    d = np.prod(dims)

    if base_name is None:

        base_name = mmap_fnames[0]
        base_name = base_name[:base_name.find('_d1_')] + '-#-' + str(
            len(mmap_fnames))