コード例 #1
0
c, dview, n_processes =\
    cm.cluster.setup_cluster(backend='local', n_processes=None,
                             single_thread=False)

#%% save files to be processed

fnames = ['example_movies/demoMovie.tif']
# location of dataset  (can actually be a list of filed to be concatenated)
add_to_movie = -np.min(cm.load(fnames[0], subindices=range(200))).astype(float)
# determine minimum value on a small chunk of data
add_to_movie = np.maximum(add_to_movie, 0)
# if minimum is negative subtract to make the data non-negative
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames,
                               dview=dview,
                               base_name=base_name,
                               add_to_movie=add_to_movie)
name_new.sort()
fname_new = cm.save_memmap_join(name_new, base_name='Yr', dview=dview)
#%% LOAD MEMORY MAPPABLE FILE
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')

#%% play movie, press q to quit
play_movie = False
if play_movie:
    cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

#%% correlation image. From here infer neuron size and density
Cn = cm.movie(images).local_correlations(swap_dim=False)
コード例 #2
0
    m_els = m_rig
else:
    fnames = [mc.fname_tot_els]
    border_to_0 = bord_px_els

# if you need to crop the borders use slicing
# idx_x=slice(border_nan,-border_nan,None)
# idx_y=slice(border_nan,-border_nan,None)
# idx_xy=(idx_x,idx_y)
idx_xy = None
add_to_movie = -np.nanmin(m_els[:100]) + 1  # movie must be positive
# if you need to remove frames from the beginning of each file
remove_init = 0
# downsample movie in time: use .2 or .1 if file is large and you want a quick answer
base_name = fname.split('/')[-1][:-4]
name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name, resize_fact=(
    1, 1, 1), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0)
name_new.sort()
print(name_new)

#%% concatenate chunks if needed
if len(name_new) > 1:
    fname_new = cm.save_memmap_join(
        name_new, base_name='Yr', n_chunks=12, dview=dview)
else:
    print('One file only, not saving!')
    fname_new = name_new[0]

t2 = time.time() - t1

#%% LOAD MEMMAP FILE
# fname_new='Yr_d1_501_d2_398_d3_1_order_F_frames_369_.mmap'
コード例 #3
0
                                      save_hdf5=False,
                                      remove_blanks=False)
#%%
xy_shifts = []
for fl in new_fls:
    if os.path.exists(fl[:-3] + 'npz'):
        print((fl[:-3] + 'npz'))
        with np.load(fl[:-3] + 'npz') as ld:
            xy_shifts.append(ld['shifts'])
    else:
        raise Exception('*********************** ERROR, FILE NOT EXISTING!!!')
#%%
resize_facts = (1, 1, .2)
name_new = cm.save_memmap_each(new_fls,
                               dview=c[:],
                               base_name=None,
                               resize_fact=resize_facts,
                               remove_init=0,
                               xy_shifts=xy_shifts)
#%%
fname_new = cm.save_memmap_join(name_new,
                                base_name='TOTAL_',
                                n_chunks=6,
                                dview=c[:])
#%%
m = cm.load('TOTAL__d1_512_d2_512_d3_1_order_C_frames_2300_.mmap', fr=6)
#%%
tmp = np.median(m, 0)
#%%
Cn = m.local_correlations(eight_neighbours=True, swap_dim=False)
pl.imshow(Cn, cmap='gray')
#%%
コード例 #4
0
    cm.utils.visualization.view_patches_bar(Yr, A, C, cnm.b, cnm.C_on[:cnm.gnb],
                                            dims[0], dims[1], YrA=cnm.noisyC[cnm.gnb:cnm.M] - C, img=Cn)
#%%
else:  # run offline CNMF algorithm
    #%% start cluster
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

    #%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE

    # can actually be a lost of movie to concatenate
    fnames = ['example_movies/gmc_980_30mw_00001_green.tif']
    add_to_movie = 0  # the movie must be positive!!!
    downsample_factor = .5  # use .2 or .1 if file is large and you want a quick answer
    base_name = 'Yr'
    name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name, resize_fact=(
        1, 1, downsample_factor), add_to_movie=add_to_movie)
    name_new.sort()
    fname_new = cm.save_memmap_join(name_new, base_name='Yr', dview=dview)

    #%% LOAD MEMORY MAPPABLE FILE

    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Y = np.reshape(Yr, dims + (T,), order='F')

    #%% play movie, press q to quit

    play_movie = False
    if play_movie:
        cm.movie(images).play(fr=50, magnification=3, gain=2.)
コード例 #5
0
    raise Exception("Could not find any tiff file")

print(fnames)
fnames = fnames
##%%
#idx_x=slice(12,500,None)
#idx_y=slice(12,500,None)
#idx_xy=(idx_x,idx_y)
add_to_movie = 0  # of movie too negative need to add a baseline
downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames,
                               dview=dview,
                               base_name=base_name,
                               resize_fact=(1, 1, downsample_factor),
                               remove_init=0,
                               idx_xy=idx_xy,
                               add_to_movie=add_to_movie)
name_new.sort()
print(name_new)

#%%
fname_new = cm.save_memmap_join(name_new,
                                base_name='Yr',
                                n_chunks=12,
                                dview=dview)
#%%
Yr, dims, T = cm.load_memmap(fname_new)
Y = np.reshape(Yr, dims + (T, ), order='F')
#%% visualize correlation image
コード例 #6
0
def test_general():
    """  General Test of pipeline with comparison against ground truth
    A shorter version than the demo pipeline that calls comparison for the real test work



        Raises:
      ---------
        params_movie

        params_cnmf

        rig correction

        cnmf on patch

        cnmf full frame

        not able to read the file

        no groundtruth


    """
#\bug
#\warning

    global params_movie
    global params_diplay
    fname = params_movie['fname']
    niter_rig = params_movie['niter_rig']
    max_shifts = params_movie['max_shifts']
    splits_rig = params_movie['splits_rig']
    num_splits_to_process_rig = params_movie['num_splits_to_process_rig']

    cwd = os.getcwd()
    fname = download_demo(fname[0])
    m_orig = cm.load(fname)
    min_mov = m_orig[:400].min()
    comp = comparison.Comparison()
    comp.dims = np.shape(m_orig)[1:]


################ RIG CORRECTION #################
    t1 = time.time()
    mc = MotionCorrect(fname, min_mov,
                       max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig,
                       num_splits_to_process_rig=num_splits_to_process_rig,
                       shifts_opencv=True, nonneg_movie=True)
    mc.motion_correct_rigid(save_movie=True)
    m_rig = cm.load(mc.fname_tot_rig)
    bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
    comp.comparison['rig_shifts']['timer'] = time.time() - t1
    comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig
###########################################

    if 'max_shifts' not in params_movie:
        fnames = params_movie['fname']
        border_to_0 = 0
    else:  # elif not params_movie.has_key('overlaps'):
        fnames = mc.fname_tot_rig
        border_to_0 = bord_px_rig
        m_els = m_rig

    idx_xy = None
    add_to_movie = -np.nanmin(m_els) + 1  # movie must be positive
    remove_init = 0
    downsample_factor = 1
    base_name = fname[0].split('/')[-1][:-4]
    name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=(
        1, 1, downsample_factor), remove_init=remove_init,
        idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0)
    name_new.sort()

    if len(name_new) > 1:
        fname_new = cm.save_memmap_join(
            name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None)
    else:
        print('One file only, not saving!')
        fname_new = name_new[0]

    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Y = np.reshape(Yr, dims + (T,), order='F')

    if np.min(images) < 0:
        # TODO: should do this in an automatic fashion with a while loop at the 367 line
        raise Exception('Movie too negative, add_to_movie should be larger')
    if np.sum(np.isnan(images)) > 0:
        # TODO: same here
        raise Exception(
            'Movie contains nan! You did not remove enough borders')

    Cn = cm.local_correlations(Y)
    Cn[np.isnan(Cn)] = 0
    p = params_movie['p']
    merge_thresh = params_movie['merge_thresh']
    rf = params_movie['rf']
    stride_cnmf = params_movie['stride_cnmf']
    K = params_movie['K']
    init_method = params_movie['init_method']
    gSig = params_movie['gSig']
    alpha_snmf = params_movie['alpha_snmf']

    if params_movie['is_dendrites'] == True:
        if params_movie['init_method'] is not 'sparse_nmf':
            raise Exception('dendritic requires sparse_nmf')
        if params_movie['alpha_snmf'] is None:
            raise Exception('need to set a value for alpha_snmf')


################ CNMF PART PATCH #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'],
                    dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'],
                    method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[
                        'only_init_patch'],
                    gnb=params_movie['gnb'], method_deconvolution='oasis')
    comp.cnmpatch = copy.copy(cnm)
    cnm = cnm.fit(images)
    A_tot = cnm.A
    C_tot = cnm.C
    YrA_tot = cnm.YrA
    b_tot = cnm.b
    f_tot = cnm.f
    # DISCARDING
    print(('Number of components:' + str(A_tot.shape[-1])))
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_patch']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_patch']
    fitness_delta_min = params_movie['fitness_delta_min_patch']
    Npeaks = params_movie['Npeaks']
    traces = C_tot + YrA_tot
    idx_components, idx_components_bad = estimate_components_quality(
        traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate,
        Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min)
    #######
    A_tot = A_tot.tocsc()[:, idx_components]
    C_tot = C_tot[idx_components]
    comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1
    comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()]
#################### ########################


################ CNMF PART FULL #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot,
                    f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis')
    cnm = cnm.fit(images)
    # DISCARDING
    A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_full']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_full']
    fitness_delta_min = params_movie['fitness_delta_min_full']
    Npeaks = params_movie['Npeaks']
    traces = C + YrA
    idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality(
        traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min,
        fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min, return_all=True)
    ##########
    A_tot_full = A_tot.tocsc()[:, idx_components]
    C_tot_full = C_tot[idx_components]
    comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1
    comp.comparison['cnmf_full_frame']['ourdata'] = [
        A_tot_full.copy(), C_tot_full.copy()]
#################### ########################
    comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn)
    log_files = glob.glob('*_LOG_*')
    try:
        for log_file in log_files:
            os.remove(log_file)
    except:
        print('Cannot remove log files')
############ assertions ##################
    pb = False
    if (comp.information['differences']['params_movie']):
        print("you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)")
        pb = True
    if (comp.information['differences']['params_cnm']):
        print("you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)")
        pb = True
    if (comp.information['diff']['rig']['isdifferent']):
        print("the rigid shifts are different from the groundtruth ")
        pb = True
    if (comp.information['diff']['cnmpatch']['isdifferent']):
        print("the cnmf on patch produces different results than the groundtruth ")
        pb = True
    if (comp.information['diff']['cnmfull']['isdifferent']):
        print("the cnmf full frame produces different  results than the groundtruth ")
        pb = True

    assert (not pb)
コード例 #7
0
    new_fls.append(fl[:-3] + 'tif')
#%%
file_res = cb.motion_correct_parallel(new_fls, fr=6, template=final_template, margins_out=0,
                                      max_shift_w=25, max_shift_h=25, dview=c[:], apply_smooth=True, save_hdf5=False, remove_blanks=False)
#%%
xy_shifts = []
for fl in new_fls:
    if os.path.exists(fl[:-3] + 'npz'):
        print((fl[:-3] + 'npz'))
        with np.load(fl[:-3] + 'npz') as ld:
            xy_shifts.append(ld['shifts'])
    else:
        raise Exception('*********************** ERROR, FILE NOT EXISTING!!!')
#%%
resize_facts = (1, 1, .2)
name_new = cm.save_memmap_each(
    new_fls, dview=c[:], base_name=None, resize_fact=resize_facts, remove_init=0, xy_shifts=xy_shifts)
#%%
fname_new = cm.save_memmap_join(
    name_new, base_name='TOTAL_', n_chunks=6, dview=c[:])
#%%
m = cm.load('TOTAL__d1_512_d2_512_d3_1_order_C_frames_2300_.mmap', fr=6)
#%%
tmp = np.median(m, 0)
#%%
Cn = m.local_correlations(eight_neighbours=True, swap_dim=False)
pl.imshow(Cn, cmap='gray')
#%%
lq, hq = np.percentile(tmp, [10, 98])
pl.imshow(tmp, cmap='gray', vmin=lq, vmax=hq)
#%%
pl.imshow(tmp[10:160, 120:450], cmap='gray', vmin=lq, vmax=hq)
コード例 #8
0
ファイル: cnmf_process.py プロジェクト: dsp-uga/Johnson
def CNMF_PROCESS(tif_movie, _k, _g, _merge):
    """
    Inputs .tif movie (transforming from time series images)
    Applys constrained nonnegative matrix factorization
    Outputs selected neurons sparse matrix and dimension of movie
    """

    dataset_name = tif_movie.replace('.tif', '')

    # start a cluster
    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False)

    # process movie file
    fnames = [tif_movie]
    # location of dataset  (can actually be a list of filed to be concatenated)
    add_to_movie = -np.min(cm.load(fnames[0],
                                   subindices=range(200))).astype(float)
    # determine minimum value on a small chunk of data
    add_to_movie = np.maximum(add_to_movie, 0)
    # if minimum is negative subtract to make the data non-negative
    base_name = 'Yr'
    name_new = cm.save_memmap_each(fnames,
                                   dview=dview,
                                   base_name=base_name,
                                   add_to_movie=add_to_movie)
    name_new.sort()
    fname_new = cm.save_memmap_join(name_new, base_name='Yr', dview=dview)
    ### LOAD MEMORY MAPPABLE FILE
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    # ### play movie, press q to quit
    # play_movie = False
    # if play_movie:
    #     cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

    ### correlation image. From here infer neuron size and density
    Cn = cm.movie(images).local_correlations(swap_dim=False)
    plt.imshow(Cn, cmap='gray')
    plt.title('Correlation Image')
    # plt.show()
    plt.savefig('figures/correlation_' + dataset_name + '.png')

    ### set up some parameters
    is_patches = False  # flag for processing in patches or not

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 3  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = _k  # number of neurons expected (in the whole FOV)

    gSig = [_g, _g]  # expected half size of neurons
    merge_thresh = _merge  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    ### Now RUN CNMF
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    gnb=gnb,
                    rf=rf,
                    stride=stride,
                    rolling_sum=False)
    cnm = cnm.fit(images)

    ### plot contour plots of components
    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of components')
    # plt.show()
    plt.savefig('figures/contour_' + dataset_name + '.png')

    ### COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)

    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient
    min_SNR = 2.5  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.90  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.10  # if cnn classifier predicts below this value, reject

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=use_cnn,
                                         thresh_cnn_lowest=min_cnn_thr)

    ### visualize selected and rejected components
    plt.figure()
    plt.subplot(1, 2, 1)
    cm.utils.visualization.plot_contours(cnm.A[:, idx_components], Cn, thr=0.9)
    plt.title('Selected components')
    plt.savefig('figures/selected_' + dataset_name + '.png')
    plt.subplot(1, 2, 2)
    plt.title('Discaded components')
    cm.utils.visualization.plot_contours(cnm.A[:, idx_components_bad],
                                         Cn,
                                         thr=0.9)
    plt.savefig('figures/discaded_' + dataset_name + '.png')
    # plt.show(block=True)

    ### visualize selected components
    cm.utils.visualization.view_patches_bar(Yr,
                                            cnm.A.tocsc()[:, idx_components],
                                            cnm.C[idx_components, :],
                                            cnm.b,
                                            cnm.f,
                                            dims[0],
                                            dims[1],
                                            YrA=cnm.YrA[idx_components, :],
                                            img=Cn)

    ### STOP CLUSTER and clean up log files
    cm.stop_server()

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    dview.terminate()
    return cnm.A.tocsc()[:, idx_components]
コード例 #9
0
ファイル: demo_pipeline_cnmfE.py プロジェクト: nhat-le/CaImAn
            save_movie=True,
            # whether to save movie in memory
            # mapped format
            new_templ=new_templ
            # template to initialize motion correction
        )

        filename_reorder = mc.fname_tot_els
        bord_px = np.ceil(
            np.maximum(np.max(np.abs(mc.x_shifts_els)),
                       np.max(np.abs(mc.y_shifts_els)))).astype(np.int)

# create memory mappable file in the right order on the hard drive (C order)
fname_new = cm.save_memmap_each(filename_reorder,
                                base_name='memmap_',
                                order='C',
                                border_to_0=bord_px,
                                dview=dview)
fname_new = cm.save_memmap_join(fname_new, base_name='memmap_', dview=dview)

# load memory mappable file
Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T, ) + dims, order='F')
#%% compute some summary images (correlation and peak to noise)
cn_filter, pnr = cm.summary_images.correlation_pnr(
    Y, gSig=gSig, swap_dim=False
)  # change swap dim if output looks weird, it is a problem with tiffile
#%% inspect the summary images and set the parameters
inspect_correlation_pnr(cn_filter, pnr)
#%%
min_corr = .8  # min correlation of peak (from correlation image)
コード例 #10
0
c, dview, n_processes =\
    cm.cluster.setup_cluster(backend='local', n_processes=None,
                             single_thread=False)

#%% save files to be processed

# This datafile is distributed with Caiman
fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]
# location of dataset  (can actually be a list of filed to be concatenated)
add_to_movie = -np.min(cm.load(fnames[0], subindices=range(200))).astype(float)
# determine minimum value on a small chunk of data
add_to_movie = np.maximum(add_to_movie, 0)
# if minimum is negative subtract to make the data non-negative
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name,
                               add_to_movie=add_to_movie)
name_new.sort()
fname_new = cm.save_memmap_join(name_new, base_name='Yr', dview=dview)
#%% LOAD MEMORY MAPPABLE FILE
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')

#%% play movie, press q to quit
play_movie = False
if play_movie:
    cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

#%% correlation image. From here infer neuron size and density
Cn = cm.movie(images).local_correlations(swap_dim=False)
plt.imshow(Cn, cmap='gray')
コード例 #11
0
def run(batch_dir: str, UUID: str):
    start_time = time()

    output = {'status': 0, 'output_info': ''}
    n_processes = os.environ['_MESMERIZE_N_THREADS']
    n_processes = int(n_processes)
    file_path = batch_dir + '/' + UUID

    filename = file_path + '.tiff'
    input_params = pickle.load(open(file_path + '.params', 'rb'))

    frate = input_params['frate']
    gSig = input_params['gSig']
    gSiz = 3 * gSig + 1
    min_corr = input_params['min_corr']
    min_pnr = input_params['min_pnr']
    min_SNR = input_params['min_SNR']
    r_values_min = input_params['r_values_min']
    decay_time = input_params['decay_time']
    rf = input_params['rf']
    stride = input_params['stride']
    gnb = input_params['gnb']
    nb_patch = input_params['nb_patch']
    k = input_params['k']
    if 'Ain' in input_params.keys():
        if input_params['Ain']:
            print('>> Ain specified, looking for cnm-A file <<')
            item_uuid = input_params['Ain']
            parent_batch_dir = os.environ['CURR_BATCH_DIR']
            item_out_file = os.path.join(parent_batch_dir, f'{item_uuid}.out')
            t0 = time()
            timeout = 60
            while not os.path.isfile(item_out_file):
                print('>>> cnm-A not found, waiting for 15 seconds <<<')
                sleep(15)
                if time() - t0 > timeout:
                    output.update({'status': 0, 'output_info': 'Timeout exceeding in waiting for Ain input file'})
                    raise TimeoutError('Timeout exceeding in waiting for Ain input file')

            if os.path.isfile(item_out_file):
                if json.load(open(item_out_file, 'r'))['status']:
                    Ain_file = os.path.join(parent_batch_dir, item_uuid + '_cnm-A.pikl')
                    Ain = pickle.load(open(Ain_file, 'rb'))
                    print('>>> Found Ain file <<<')
                else:
                    raise FileNotFoundError('>>> Could not find specified Ain file <<<')
        else:
            Ain = None
    else:
        Ain = None

    if 'method_deconvolution' in input_params.keys():
        method_deconvolution = input_params['method_deconvolution']
    else:
        method_deconvolution = 'oasis'

    if 'deconv_flag' in input_params.keys():
        deconv_flag = input_params['deconv_flag']
    else:
        deconv_flag = True

    filename = [filename]

    print('*********** Creating Process Pool ***********')

    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',  # use this one
                                                     n_processes=n_processes,
                                                     single_thread=False)
    if 'bord_px' in input_params.keys():
        bord_px = input_params['bord_px']
    else:
        bord_px = 6

    try:
        print('Creating memmap')
        fname_new = cm.save_memmap_each(
            filename,
            base_name='memmap_' + UUID,
            order='C',
            border_to_0=bord_px,
            dview=dview)
        fname_new = cm.save_memmap_join(fname_new, base_name='memmap_' + UUID, dview=dview)
        # load memory mappable file
        Yr, dims, T = cm.load_memmap(fname_new)
        Y = Yr.T.reshape((T,) + dims, order='F')
        # compute some summary images (correlation and peak to noise)
        # change swap dim if output looks weird, it is a problem with tiffile
        cn_filter, pnr = cm.summary_images.correlation_pnr(
            Y, gSig=gSig, swap_dim=False)
        if not input_params['do_cnmfe'] and input_params['do_corr_pnr']:
            pickle.dump(cn_filter, open(UUID + '_cn_filter.pikl', 'wb'), protocol=4)
            pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)

            output_file_list = [UUID + '_pnr.pikl',
                                UUID + '_cn_filter.pikl',
                                UUID + '_dims.pikl',
                                UUID + '.out'
                                ]

            output.update({'output': UUID,
                           'status': 1,
                           'output_info': 'inspect correlation & pnr',
                           'output_files': output_file_list
                           })

            dview.terminate()

            for mf in glob(batch_dir + '/memmap_*'):
                os.remove(mf)

            end_time = time()
            processing_time = (end_time - start_time) / 60
            output.update({'processing_time': processing_time})

            json.dump(output, open(file_path + '.out', 'w'))

            return

        cnm = cnmf.CNMF(n_processes=n_processes,
                        method_init='corr_pnr',  # use this for 1 photon
                        k=k,  # neurons per patch
                        gSig=(gSig, gSig),  # half size of neuron
                        gSiz=(gSiz, gSiz),  # in general 3*gSig+1
                        merge_thresh=.3,  # threshold for merging
                        p=1,  # order of autoregressive process to fit
                        dview=dview,  # if None it will run on a single thread
                        # downsampling factor in time for initialization, increase if you have memory problems
                        tsub=2,
                        # downsampling factor in space for initialization, increase if you have memory problems
                        ssub=2,
                        # if you want to initialize with some preselcted components you can pass them here as boolean vectors
                        Ain=Ain,
                        # half size of the patch (final patch will be 100x100)
                        rf=(rf, rf),
                        # overlap among patches (keep it at least large as 4 times the neuron size)
                        stride=(stride, stride),
                        only_init_patch=True,  # just leave it as is
                        gnb=gnb,  # number of background components
                        nb_patch=nb_patch,  # number of background components per patch
                        method_deconvolution=method_deconvolution,  # could use 'cvxpy' alternatively
                        deconv_flag=deconv_flag,
                        low_rank_background=True,  # leave as is
                        # sometimes setting to False improve the results
                        update_background_components=True,
                        min_corr=min_corr,  # min peak value from correlation image
                        min_pnr=min_pnr,  # min peak to noise ration from PNR image
                        normalize_init=False,  # just leave as is
                        center_psf=True,  # leave as is for 1 photon
                        del_duplicates=True,  # whether to remove duplicates from initialization
                        border_pix=bord_px)  # number of pixels to not consider in the borders
        cnm.fit(Y)

        #  DISCARD LOW QUALITY COMPONENTS
        idx_components, idx_components_bad, comp_SNR, r_values, pred_CNN = estimate_components_quality_auto(
            Y, cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, frate,
            decay_time, gSig, dims, dview=dview,
            min_SNR=min_SNR, r_values_min=r_values_min, use_cnn=False)

        # np.save(filename[:-5] + '_curves.npy', cnm.C)
        pickle.dump(Yr, open(UUID + '_Yr.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.A, open(UUID + '_cnm-A.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.b, open(UUID + '_cnm-b.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.C, open(UUID + '_cnm-C.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.f, open(UUID + '_cnm-f.pikl', 'wb'), protocol=4)
        pickle.dump(idx_components, open(UUID + '_idx_components.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.YrA, open(UUID + '_cnm-YrA.pikl', 'wb'), protocol=4)
        pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)
        pickle.dump(cn_filter, open(UUID + '_cn_filter.pikl', 'wb'), protocol=4)
        pickle.dump(dims, open(UUID + '_dims.pikl', 'wb'), protocol=4)

        output_file_list = [UUID + '_cnm-A.pikl',
                            UUID + '_Yr.pikl',
                            UUID + '_cnm-b.pikl',
                            UUID + '_cnm-C.pikl',
                            UUID + '_cnm-f.pikl',
                            UUID + '_idx_components.pikl',
                            UUID + '_cnm-YrA.pikl',
                            UUID + '_pnr.pikl',
                            UUID + '_cn_filter.pikl',
                            UUID + '_dims.pikl',
                            UUID + '.out'
                            ]
        output.update({'output': filename[:-5],
                       'status': 1,
                       'output_files': output_file_list
                       })

    except Exception as e:
        output.update({'status': 0, 'output_info': traceback.format_exc()})

    dview.terminate()

    for mf in glob(batch_dir + '/memmap_*'):
        os.remove(mf)

    end_time = time()
    processing_time = (end_time - start_time) / 60
    output.update({'processing_time': processing_time})

    json.dump(output, open(file_path + '.out', 'w'))
コード例 #12
0
            max_deviation_rigid=max_deviation_rigid,
            dview=dview,
            splits_rig=None,
            save_movie=True,  # whether to save movie in memory mapped format
            new_templ=new_templ  # template to initialize motion correction
        )

        filename_reorder = mc.fname_tot_els
        bord_px = np.ceil(
            np.maximum(np.max(np.abs(mc.x_shifts_els)),
                       np.max(np.abs(mc.y_shifts_els)))).astype(np.int)

# create memory mappable file in the right order on the hard drive (C order)
fname_new = cm.save_memmap_each(
    filename_reorder,
    base_name='memmap_',
    order='C',
    border_to_0=bord_px,
    dview=dview)
fname_new = cm.save_memmap_join(fname_new, base_name='memmap_', dview=dview)


# load memory mappable file
Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T,) + dims, order='F')
#%% compute some summary images (correlation and peak to noise)
# change swap dim if output looks weird, it is a problem with tiffile
cn_filter, pnr = cm.summary_images.correlation_pnr(Y, gSig=gSig, swap_dim=False)
# inspect the summary images and set the parameters
inspect_correlation_pnr(cn_filter, pnr)
# print parameters set above, modify them if necessary based on summary images
print(min_corr) # min correlation of peak (from correlation image)
コード例 #13
0
ファイル: cnmf.py プロジェクト: gucky92/haussmeister
def process_data(haussio_data,
                 mask=None,
                 p=2,
                 nrois_init=400,
                 roi_iceberg=0.9):
    if mask is not None:
        raise RuntimeError("mask not supported in cnmf.process_data")

    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'
    shapefn = os.path.join(haussio_data.dirname_comp,
                           haussio.THOR_RAW_FN[:-3] + "shape.npy")
    shape = np.load(shapefn)
    if len(shape) == 5:
        d1, d2 = shape[2], shape[3]
        fn_mmap = get_mmap_name('Yr', shape[2], shape[3], shape[0])
    else:
        d1, d2 = shape[1], shape[2]
        fn_mmap = get_mmap_name('Yr', shape[1], shape[2], shape[0])
    fn_mmap = os.path.join(haussio_data.dirname_comp, fn_mmap)
    print(fn_mmap, os.path.exists(fn_mmap), d1, d2)

    if not os.path.exists(fn_cnmf):
        # fn_raw = os.path.join(haussio_data.dirname_comp, haussio.THOR_RAW_FN)
        fn_sima = haussio_data.dirname_comp + '.sima'
        fnames = [
            fn_sima,
        ]
        fnames.sort()
        print(fnames)
        fnames = fnames

        final_frate = 1.0 / haussio_data.dt
        downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
        final_frate *= downsample_factor

        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)

        idx_xy = None
        base_name = 'Yr'
        name_new = cm.save_memmap_each(fnames,
                                       dview=dview,
                                       base_name=base_name,
                                       resize_fact=(1, 1, downsample_factor),
                                       remove_init=0,
                                       idx_xy=idx_xy)
        name_new.sort()
        print(name_new)

        if len(name_new) > 1:
            fname_new = cm.save_memmap_join(name_new,
                                            base_name='Yr',
                                            n_chunks=12,
                                            dview=dview)
        else:
            sys.stdout.write('One file only, not saving\n')
            fname_new = name_new[0]

        print("fname_new: " + fname_new)

        Yr, dims, T = cm.load_memmap(fname_new)
        Y = np.reshape(Yr, dims + (T, ), order='F')
        Cn = cm.local_correlations(Y)

        K = nrois_init  # number of neurons expected per patch
        gSig = [15, 15]  # expected half size of neurons
        merge_thresh = 0.8  # merging threshold, max correlation allowed
        p = 2  #order of the autoregressive system
        options = caiman_cnmf.utilities.CNMFSetParms(Y,
                                                     NCPUS,
                                                     p=p,
                                                     gSig=gSig,
                                                     K=K,
                                                     ssub=2,
                                                     tsub=2)

        Yr, sn, g, psx = caiman_cnmf.pre_processing.preprocess_data(
            Yr, dview=dview, **options['preprocess_params'])
        Atmp, Ctmp, b_in, f_in, center = caiman_cnmf.initialization.initialize_components(
            Y, **options['init_params'])

        Ain, Cin = Atmp, Ctmp
        A, b, Cin, f_in = caiman_cnmf.spatial.update_spatial_components(
            Yr,
            Cin,
            f_in,
            Ain,
            sn=sn,
            dview=dview,
            **options['spatial_params'])

        options['temporal_params'][
            'p'] = 0  # set this to zero for fast updating without deconvolution
        C, A, b, f, S, bl, c1, neurons_sn, g, YrA = caiman_cnmf.temporal.update_temporal_components(
            Yr,
            A,
            b,
            Cin,
            f_in,
            bl=None,
            c1=None,
            sn=None,
            g=None,
            **options['temporal_params'])

        A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = caiman_cnmf.merging.merge_components(
            Yr,
            A,
            b,
            C,
            f,
            S,
            sn,
            options['temporal_params'],
            options['spatial_params'],
            dview=dview,
            bl=bl,
            c1=c1,
            sn=neurons_sn,
            g=g,
            thr=merge_thresh,
            mx=50,
            fast_merge=True)

        A2, b2, C2, f = caiman_cnmf.spatial.update_spatial_components(
            Yr, C_m, f, A_m, sn=sn, dview=dview, **options['spatial_params'])
        options['temporal_params'][
            'p'] = p  # set it back to original value to perform full deconvolution
        C2, A2, b2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = caiman_cnmf.temporal.update_temporal_components(
            Yr,
            A2,
            b2,
            C2,
            f,
            dview=dview,
            bl=None,
            c1=None,
            sn=None,
            g=None,
            **options['temporal_params'])

        tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
        tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
        Npeaks = 10
        traces = C2 + YrA
        fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
            evaluate_components(
                Y, traces, A2, C2, b2, f2, final_frate, remove_baseline=True, N=5,
                robust_std=False, Athresh=0.1, Npeaks=Npeaks, thresh_C=0.3)

        idx_components_r = np.where(r_values >= .6)[0]
        idx_components_raw = np.where(fitness_raw < -60)[0]
        idx_components_delta = np.where(fitness_delta < -20)[0]

        min_radius = gSig[0] - 2
        masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
            A2.tocsc(),
            min_radius,
            dims,
            num_std_threshold=1,
            minCircularity=0.6,
            minInertiaRatio=0.2,
            minConvexity=.8)

        idx_components = np.union1d(idx_components_r, idx_components_raw)
        idx_components = np.union1d(idx_components, idx_components_delta)
        idx_blobs = np.intersect1d(idx_components, idx_blobs)
        idx_components_bad = np.setdiff1d(range(len(traces)), idx_components)

        A2 = A2.tocsc()[:, idx_components]
        C2 = C2[idx_components, :]
        YrA = YrA[idx_components, :]
        S2 = S2[idx_components, :]

        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise", i.e. traces = C+YrA)
        # S: Spikes
        savemat(fn_cnmf, {"A": A2, "C": C2, "YrA": YrA, "S": S2, "bl": bl2})

    else:
        resdict = loadmat(fn_cnmf)
        A2 = resdict["A"]
        C2 = resdict["C"]
        YrA = resdict["YrA"]
        S2 = resdict["S"]
        bl2 = resdict["bl"]
        Yr, dims, T = cm.load_memmap(fn_mmap)
        dims = dims[1:]
        Y = np.reshape(Yr, dims + (T, ), order='F')

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(np.transpose(Y, (2, 0, 1)))
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    # DF_F, DF = cse.extract_DF_F(Y.reshape(d1*d2, T), A2, C2)

    # t0 = time.time()
    # sys.stdout.write("Ordering components... ")
    # sys.stdout.flush()
    # A_or, C_or, srt = cse.order_components(A2, C2)
    # sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))

    cm.stop_server()

    polygons = contour(A2, Y.shape[0], Y.shape[1], thr=roi_iceberg)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, zproj, S2, Y, YrA
    dview = c[:len(c)]
#%%
# idx_x=slice(12,500,None)
# idx_y=slice(12,500,None)
# idx_xy=(idx_x,idx_y)
add_to_movie = 100  # the movie must be positive!!!
border_to_0 = 0
downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
#%%
#name_new=cm.save_memmap_each(['M_FLUO_t.tif'], dview=dview,base_name=base_name, resize_fact=(1, 1, downsample_factor), remove_init=0,idx_xy=idx_xy,add_to_movie=add_to_movie,border_to_0=border_to_0)
name_new = cm.save_memmap_each(['M_FLUO_t.tif'],
                               dview=dview,
                               base_name=base_name,
                               resize_fact=(1, 1, downsample_factor),
                               remove_init=0,
                               idx_xy=idx_xy,
                               add_to_movie=add_to_movie,
                               border_to_0=border_to_0)
ame_new.sort()
print(name_new)
#%%
# Yr,dim,T=cm.load_memmap('Yr0_d1_64_d2_128_d3_1_order_C_frames_6764_.mmap')
Yr, dim, T = cm.load_memmap('Yr0_d1_512_d2_512_d3_1_order_C_frames_1076_.mmap')

res, idfl, shape_grid = apply_to_patch(Yr, (T, ) + dim,
                                       None,
                                       dim[0],
                                       8,
                                       motion_correct_online,
                                       200,
コード例 #15
0
def run():

    data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \
                  r"\180323-M360495-deepscope\02\02_"
    base_name = '180323_M360495_02'
    t_downsample_rate = 10.

    plane_ns = [
        p for p in os.listdir(data_folder)
        if os.path.isdir(os.path.join(data_folder, p))
    ]
    plane_ns.sort()
    print('planes:')
    print('\n'.join(plane_ns))

    ## start cluster
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=3,
                                                     single_thread=False)

    for plane_n in plane_ns:
        print('\nprocessing {} ...'.format(plane_n))

        plane_folder = os.path.join(data_folder, plane_n, 'corrected')
        os.chdir(plane_folder)

        f_ns = [
            f for f in os.listdir(plane_folder) if f[-14:] == '_corrected.tif'
        ]
        f_ns.sort()
        print('\n'.join(f_ns))

        min_tot = 0
        for fn in f_ns:
            min_tot = min(
                [min_tot,
                 np.min(tf.imread(os.path.join(plane_folder, fn)))])
        print('minimum pixel value of entire movie: ' + str(min_tot))

        add_to_movie = 10. - min_tot  # the movie must be positive!!!
        t_ds_factor = 1. / t_downsample_rate  # use .2 or .1 if file is large and you want a quick answer
        f_paths = [os.path.join(plane_folder, f) for f in f_ns]

        name_new = cm.save_memmap_each(f_paths,
                                       dview=dview,
                                       base_name=base_name + '_' + plane_n +
                                       '_each',
                                       resize_fact=(1., 1., t_ds_factor),
                                       add_to_movie=add_to_movie)
        name_new.sort()

        fname_new = cm.save_memmap_join(name_new,
                                        base_name=base_name + '_' + plane_n,
                                        dview=dview,
                                        n_chunks=100)
        print('\n{}'.format(fname_new))

        save_file = h5py.File(
            os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'))
        save_file['bias_added_to_movie'] = add_to_movie
        save_file.close()

        single_fns = [f for f in os.listdir(plane_folder) if '_each' in f]
        for single_fn in single_fns:
            os.remove(os.path.join(plane_folder, single_fn))
コード例 #16
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    #%% save files to be processed

    # This datafile is distributed with Caiman
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # location of dataset  (can actually be a list of filed to be concatenated)
    add_to_movie = -np.min(cm.load(fnames[0],
                                   subindices=range(200))).astype(float)
    # determine minimum value on a small chunk of data
    add_to_movie = np.maximum(add_to_movie, 0)
    # if minimum is negative subtract to make the data non-negative
    base_name = 'Yr'
    name_new = cm.save_memmap_each(fnames,
                                   dview=dview,
                                   base_name=base_name,
                                   add_to_movie=add_to_movie)
    name_new.sort()
    fname_new = cm.save_memmap_join(name_new, base_name='Yr', dview=dview)
    #%% LOAD MEMORY MAPPABLE FILE
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    #%% play movie, press q to quit
    play_movie = False
    if play_movie:
        cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

#%% correlation image. From here infer neuron size and density
    Cn = cm.movie(images).local_correlations(swap_dim=False)
    plt.imshow(Cn, cmap='gray')
    plt.title('Correlation Image')

    #%% set up some parameters

    is_patches = True  # flag for processing in patches or not

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    #%% Now RUN CNMF
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    gnb=gnb,
                    rf=rf,
                    stride=stride,
                    rolling_sum=False)
    cnm = cnm.fit(images)

    #%% plot contour plots of components

    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of components')

    #%%
    A_in, C_in, b_in, f_in = cnm.A[:, :], cnm.C[:], cnm.b, cnm.f
    cnm2 = cnmf.CNMF(n_processes=1,
                     k=A_in.shape[-1],
                     gSig=gSig,
                     p=p,
                     dview=dview,
                     merge_thresh=merge_thresh,
                     Ain=A_in,
                     Cin=C_in,
                     b_in=b_in,
                     f_in=f_in,
                     rf=None,
                     stride=None,
                     gnb=gnb,
                     method_deconvolution='oasis',
                     check_nan=True)

    cnm2 = cnm2.fit(images)
    #%% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient
    min_SNR = 2.5  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.90  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.95  # if cnn classifier predicts below this value, reject

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=use_cnn,
                                         thresh_cnn_min=min_cnn_thr)
    #%% visualize selected and rejected components
    plt.figure()
    plt.subplot(1, 2, 1)
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components],
                                         Cn,
                                         thr=0.9)
    plt.title('Selected components')
    plt.subplot(1, 2, 2)
    plt.title('Discaded components')
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components_bad],
                                         Cn,
                                         thr=0.9)

    #%%
    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm2.A.tocsc()[:,
                                                              idx_components],
                                               Cn,
                                               thr=0.9)
    plt.title('Contour plots of components')
    #%% visualize selected components
    cm.utils.visualization.view_patches_bar(Yr,
                                            cnm2.A.tocsc()[:, idx_components],
                                            cnm2.C[idx_components, :],
                                            cnm2.b,
                                            cnm2.f,
                                            dims[0],
                                            dims[1],
                                            YrA=cnm2.YrA[idx_components, :],
                                            img=Cn)
    #%% STOP CLUSTER and clean up log files
    cm.stop_server()

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
コード例 #17
0
        c = Client()

    print(('Using ' + str(len(c)) + ' processes'))
    dview = c[:len(c)]
#%%
# idx_x=slice(12,500,None)
# idx_y=slice(12,500,None)
# idx_xy=(idx_x,idx_y)
add_to_movie = 100  # the movie must be positive!!!
border_to_0 = 0
downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
#%%
#name_new=cm.save_memmap_each(['M_FLUO_t.tif'], dview=dview,base_name=base_name, resize_fact=(1, 1, downsample_factor), remove_init=0,idx_xy=idx_xy,add_to_movie=add_to_movie,border_to_0=border_to_0)
name_new = cm.save_memmap_each(['M_FLUO_t.tif'], dview=dview, base_name=base_name, resize_fact=(
    1, 1, downsample_factor), remove_init=0, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0)
ame_new.sort()
print(name_new)
#%%
# Yr,dim,T=cm.load_memmap('Yr0_d1_64_d2_128_d3_1_order_C_frames_6764_.mmap')
Yr, dim, T = cm.load_memmap('Yr0_d1_512_d2_512_d3_1_order_C_frames_1076_.mmap')

res, idfl, shape_grid = apply_to_patch(Yr, (T,) + dim, None, dim[0], 8, motion_correct_online, 200, max_shift_w=15, max_shift_h=15,
                                       save_base_name='test_mmap',  init_frames_template=100, show_movie=False, remove_blanks=True, n_iter=2, show_template=False)
#%%
[pl.plot(np.array(r[0][-2])) for r in res]
[pl.plot(np.array(r[0][-1])) for r in res]

#%%
[pl.plot(np.array(r[1][-2])) for r in res]
[pl.plot(np.array(r[1][-1])) for r in res]
コード例 #18
0
import tifffile
fnames=[sys.argv[1]]	#name of the movie
fnames_orig=fnames
final_frate=int(os.environ["FPS"]) # frame rate in Hz
K=int(os.environ["COMP"]) # number of neurons expected per patch, that seems to work well
n_processes = 5 # if using the intel nodes
single_thread=True   
dview=None
gSig=[3,3] # expected half size of neurons, works for nuclear GCaMP
merge_thresh=0.9 # merging threshold, max correlation allowed
p=2 #order of the autoregressive system
downsample_factor=1 # use .2 or .1 if file is large and you want a quick answer
final_frate=final_frate*downsample_factor
idx_xy=None
base_name=os.environ["TMPDIR"]
fname_new=cm.save_memmap_each(fnames, dview=dview,base_name=base_name, resize_fact=(0.5, 0.5, downsample_factor), remove_init=0,idx_xy=idx_xy )
fname_new=cm.save_memmap_join(fname_new,base_name=base_name+'Yr', n_chunks=n_processes, dview=dview)
Yr,dims,T=cm.load_memmap(fname_new)
Y=np.reshape(Yr,dims+(T,),order='F')
tifffile.imsave(fnames[0][:-4]+'_mean.tif',np.mean(Y,axis=2))
tifffile.imsave(base_name+'temp.tif',Y.swapaxes(0,2).swapaxes(1,2))
fnames=[base_name+'temp.tif']
fname_new=cm.save_memmap_each(fnames, dview=dview,base_name=base_name, resize_fact=(0.5, 0.5, downsample_factor), remove_init=0,idx_xy=idx_xy )
fname_new=cm.save_memmap_join(fname_new,base_name=base_name+'Yr', n_chunks=n_processes, dview=dview)
nb_back=1
options = cnmf.utilities.CNMFSetParms(Y,n_processes,p=p,gSig=gSig,K=K,ssub=2,tsub=10,nb=nb_back)
#options['preprocess_params']['max_num_samples_fft']=10000
Cn = cm.local_correlations(Y)
Yr,sn,g,psx = cnmf.pre_processing.preprocess_data(Yr,dview=dview,**options['preprocess_params'])
Ain,Cin, b_in, f_in, center=cnmf.initialization.initialize_components(Y, **options['init_params'])
Ain,b_in,Cin, f_in = cnmf.spatial.update_spatial_components(Yr, Cin, f_in, Ain, sn=sn, dview=dview,**options['spatial_params'])
コード例 #19
0
ファイル: mmapping.py プロジェクト: levskaya/CaImAn-1
    """
    if type(filenames) is not list:
        raise Exception('input should be a list of filenames')
        
    if len(filenames)>1:
        is_inconsistent_order = False
        for file__ in filenames:
            if 'order_' + order not in file__:
                is_inconsistent_order = True
           
        if is_inconsistent_order:         
            fname_new = cm.save_memmap_each(filenames,
                                        base_name=base_name,
                                        order=order,
                                        border_to_0=border_to_0,
                                        dview=dview,
                                        resize_fact=resize_fact,
                                        remove_init=remove_init,
                                        idx_xy=idx_xy,
                                        xy_shifts=xy_shifts,
                                        add_to_movie = add_to_movie)
            
            
                         

        fname_new = cm.save_memmap_join(fname_new, base_name=base_name, dview=dview, n_chunks=n_chunks,  async=async)    
    
    else:    
    # TODO: can be done online
        Ttot = 0
        for idx, f in enumerate(filenames):
            if isinstance(f, str):
コード例 #20
0
ファイル: mmapping.py プロジェクト: Peichao/Constrained_NMF
def save_memmap(filenames, base_name='Yr', resize_fact=(1, 1, 1), remove_init=0, idx_xy=None,
                order='F', xy_shifts=None, is_3D=False, add_to_movie=0, border_to_0=0, dview = None,
                n_chunks=100):

    """ Efficiently write data from a list of tif files into a memory mappable file

    Parameters:
    ----------
        filenames: list
            list of tif files or list of numpy arrays

        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"

        resize_fact: tuple
            x,y, and z downsampling factors (0.5 means downsampled by a factor 2)

        remove_init: int
            number of frames to remove at the begining of each tif file
            (used for resonant scanning images if laser in rutned on trial by trial)

        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance
            idx_xy = (slice(150,350,None), slice(150,350,None))

        order: string
            whether to save the file in 'C' or 'F' order

        xy_shifts: list
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping

        is_3D: boolean
            whether it is 3D data
        add_to_movie: floating-point
            value to add to each image point, typically to keep negative values out.
        border_to_0: (undocumented)
        dview:       (undocumented)
        n_chunks:    (undocumented)
    Returns:
    -------
        fname_new: the name of the mapped file, the format is such that
            the name will contain the frame dimensions and the number of frames

    """
    if type(filenames) is not list:
        raise Exception('input should be a list of filenames')

    if len(filenames) > 1:
        is_inconsistent_order = False
        for file__ in filenames:
            if ('order_' + order not in file__) or ('.mmap' not in file__):
                is_inconsistent_order = True


        if is_inconsistent_order: # Here we make a bunch of memmap files in the right order. Same parameters
            fname_new = cm.save_memmap_each(filenames,
                                        base_name    = base_name,
                                        order        = order,
                                        border_to_0  = border_to_0,
                                        dview        = dview,
                                        resize_fact  = resize_fact,
                                        remove_init  = remove_init,
                                        idx_xy       = idx_xy,
                                        xy_shifts    = xy_shifts,
                                        add_to_movie = add_to_movie)
        else:
            fname_new = filenames

        # The goal is to make a single large memmap file, which we do here
        if order == 'F':
            raise exception('You cannot merge files in F order, they must be in C order')


        fname_new = cm.save_memmap_join(fname_new, base_name=base_name, dview=dview, n_chunks=n_chunks, add_to_mov = add_to_movie)

    else:
    # TODO: can be done online
        Ttot = 0
        for idx, f in enumerate(filenames):
            if isinstance(f, str): # Might not always be filenames.
                print(f)

            if is_3D:
                Yr = f if not(isinstance(f, basestring)) else tifffile.imread(f)
                if idx_xy is None:
                    Yr = Yr[remove_init:]
                elif len(idx_xy) == 2:
                    Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                else:
                    Yr = Yr[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

            else:
                Yr = cm.load(f, fr=1, in_memory=True) if (isinstance(f, basestring) or isinstance(f, list)) else cm.movie(f) # TODO: Rewrite more legibly
                if xy_shifts is not None:
                    Yr = Yr.apply_shifts(xy_shifts, interpolation='cubic', remove_blanks=False)
                if idx_xy is None:
                    if remove_init > 0:
                        Yr = Yr[remove_init:]
                elif len(idx_xy) == 2:
                    Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                else:
                    raise Exception('You need to set is_3D=True for 3D data)')
                    Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

            if border_to_0 > 0:
                min_mov = Yr.calc_min()
                Yr[:, :border_to_0, :] = min_mov
                Yr[:, :, :border_to_0] = min_mov
                Yr[:, :, -border_to_0:] = min_mov
                Yr[:, -border_to_0:, :] = min_mov

            fx, fy, fz = resize_fact
            if fx != 1 or fy != 1 or fz != 1:
                if 'movie' not in str(type(Yr)):
                    Yr = cm.movie(Yr, fr=1)
                Yr = Yr.resize(fx=fx, fy=fy, fz=fz)

            T, dims = Yr.shape[0], Yr.shape[1:]
            Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
            Yr = np.reshape(Yr, (np.prod(dims), T), order='F')
            Yr = np.ascontiguousarray(Yr, dtype=np.float32) + 0.0001 + add_to_movie

            if idx == 0:
                fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(
                    1 if len(dims) == 2 else dims[2]) + '_order_' + str(order) # TODO: Rewrite more legibly
                if isinstance(f, str):
                    fname_tot = os.path.join(os.path.split(f)[0], fname_tot)
                if len(filenames) > 1:
                    big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32,
                                    shape=prepare_shape((np.prod(dims), T)), order=order)
                    big_mov[:, Ttot:Ttot + T] = Yr
                    del big_mov
                else:
                    print('SAVING WITH numpy.tofile()')
                    Yr.tofile(fname_tot)
            else:
                big_mov = np.memmap(fname_tot, dtype=np.float32, mode='r+',
                                    shape=prepare_shape((np.prod(dims), Ttot + T)), order=order)

                big_mov[:, Ttot:Ttot + T] = Yr
                del big_mov

            sys.stdout.flush()
            Ttot = Ttot + T

        fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
        try:
            # need to explicitly remove destination on windows
            os.unlink(fname_new)
        except OSError:
            pass
        os.rename(fname_tot, fname_new)

    return fname_new
コード例 #21
0
add_to_movie = -np.min(adds_to_movie)
print(adds_to_movie)
print(add_to_movie)
#%%
# add_to_movie=np.nanmin(templates_rig)+1# the movie must be positive!!!
t1 = time.time()
n_processes_mmap = 14  # lower this number if you have memory problems!
dview_sub = c[:n_processes_mmap]
downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames_map,
                               dview=dview_sub,
                               base_name=base_name,
                               resize_fact=(1, 1, downsample_factor),
                               remove_init=0,
                               idx_xy=idx_xy,
                               add_to_movie=add_to_movie,
                               border_to_0=border_to_0)
name_new.sort()
print(name_new)
t_mmap_1 = time.time() - t1
#%%
t1 = time.time()
if len(name_new) > 1:
    fname_new = cm.save_memmap_join(name_new,
                                    base_name='Yr',
                                    n_chunks=56,
                                    dview=dview)
else:
    print('One file only, not saving!')
コード例 #22
0
ファイル: CNMF.py プロジェクト: gitter-badger/MESmerize
def run(batch_dir: str, UUID: str):

    output = {'status': 0, 'output_info': ''}
    n_processes = os.environ['_MESMERIZE_N_THREADS']
    n_processes = int(n_processes)
    file_path = batch_dir + '/' + UUID

    filename = [file_path + '.tiff']
    input_params = pickle.load(open(file_path + '.params', 'rb'))

    fr = input_params['fr']
    p = input_params['p']
    gnb = input_params['gnb']
    merge_thresh = input_params['merge_thresh']
    rf = input_params['rf']
    stride_cnmf = input_params['stride_cnmf']
    K = input_params['k']
    gSig = input_params['gSig']
    gSig = [gSig, gSig]
    min_SNR = input_params['min_SNR']
    rval_thr = input_params['rval_thr']
    cnn_thr = input_params['cnn_thr']
    decay_time = input_params['decay_time']
    bord_px = input_params['bord_px']
    refit = input_params['refit']

    print('*********** Creating Process Pool ***********')
    c, dview, np = cm.cluster.setup_cluster(backend='local', n_processes=n_processes, single_thread=False)

    try:

        print('Creating memmap')
        fname_new = cm.save_memmap_each(
            filename,
            base_name='memmap_' + UUID,
            order='C',
            border_to_0=bord_px,
            dview=dview)
        fname_new = cm.save_memmap_join(fname_new, base_name='memmap_' + UUID, dview=dview)

        Yr, dims, T = cm.load_memmap(fname_new)
        Y = Yr.T.reshape((T,) + dims, order='F')

        cnm = cnmf.CNMF(n_processes=n_processes, k=K, gSig=gSig, merge_thresh=merge_thresh,
                        p=0, dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
                        method_init='greedy_roi', alpha_snmf=None,
                        only_init_patch=False, gnb=gnb, border_pix=bord_px)
        cnm.fit(Y)

        idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
            estimate_components_quality_auto(Y, cnm.A, cnm.C, cnm.b, cnm.f,
                                             cnm.YrA, fr, decay_time, gSig, dims,
                                             dview=dview, min_SNR=min_SNR,
                                             r_values_min=rval_thr, use_cnn=False,
                                             thresh_cnn_lowest=cnn_thr)

        if refit:

            A_in, C_in, b_in, f_in = cnm.A[:,
                                     idx_components], cnm.C[idx_components], cnm.b, cnm.f

            cnm2 = cnmf.CNMF(n_processes=n_processes, k=A_in.shape[-1], gSig=gSig, p=p, dview=dview,
                             merge_thresh=merge_thresh, Ain=A_in, Cin=C_in, b_in=b_in,
                             f_in=f_in, rf=None, stride=None, gnb=gnb,
                             method_deconvolution='oasis', check_nan=True)

            cnm2 = cnm2.fit(Y)

            cnmA = cnm2.A
            cnmb = cnm2.b
            cnmC = cnm2.C
            cnm_f = cnm2.f
            cnmYrA = cnm2.YrA
        else:
            cnmA = cnm.A
            cnmb = cnm.b
            cnmC = cnm.C
            cnm_f = cnm.f
            cnmYrA = cnm.YrA

        pickle.dump(Yr, open(UUID + '_Yr.pikl', 'wb'), protocol=4)
        pickle.dump(cnmA, open(UUID + '_cnm-A.pikl', 'wb'), protocol=4)
        pickle.dump(cnmb, open(UUID + '_cnm-b.pikl', 'wb'), protocol=4)
        pickle.dump(cnmC, open(UUID + '_cnm-C.pikl', 'wb'), protocol=4)
        pickle.dump(cnm_f, open(UUID + '_cnm-f.pikl', 'wb'), protocol=4)
        pickle.dump(idx_components, open(UUID + '_idx_components.pikl', 'wb'), protocol=4)
        pickle.dump(cnmYrA, open(UUID  + '_cnm-YrA.pikl', 'wb'), protocol=4)
        pickle.dump(dims, open(UUID  + '_dims.pikl', 'wb'), protocol=4)

        output_file_list = [UUID + '_cnm-A.pikl',
                            UUID + '_Yr.pikl',
                            UUID + '_cnm-b.pikl',
                            UUID + '_cnm-C.pikl',
                            UUID + '_cnm-f.pikl',
                            UUID + '_idx_components.pikl',
                            UUID + '_cnm-YrA.pikl',
                            UUID + '_dims.pikl',
                            UUID + '.out'
                            ]

        output.update({'output': UUID,
                       'status': 1,
                       'output_files': output_file_list
                       })

    except Exception:
        output.update({'status': 0, 'output_info': traceback.format_exc()})

    dview.terminate()

    for mf in glob(batch_dir + '/memmap_*'):
        os.remove(mf)

    json.dump(output, open(file_path + '.out', 'w'))
コード例 #23
0
        fnames.append(os.path.abspath(file))
fnames.sort()
if len(fnames) == 0:
    raise Exception("Could not find any tiff file")
print(fnames)
fnames = fnames
#%%
#idx_x=slice(12,500,None)
#idx_y=slice(12,500,None)
#idx_xy=(idx_x,idx_y)
downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames,
                               dview=dview,
                               base_name=base_name,
                               resize_fact=(1, 1, downsample_factor),
                               remove_init=0,
                               idx_xy=idx_xy)
name_new.sort()
print(name_new)
#%%
name_new = cm.save_memmap_each(fnames,
                               dview=dview,
                               base_name='Yr',
                               resize_fact=(1, 1, 1),
                               remove_init=0,
                               idx_xy=None)
name_new.sort()
#%%
fname_new = cm.save_memmap_join(name_new,
                                base_name='Yr',
コード例 #24
0
def main():
    pass # For compatibility between running under Spyder and the CLI

#%% First setup some parameters

    # dataset dependent parameters
    display_images = False           # Set to true to show movies and images
    fnames = ['data_endoscope.tif']  # filename to be processed
    frate = 10                       # movie frame rate
    decay_time = 0.4                 # length of a typical transient in seconds
    
    # motion correction parameters
    do_motion_correction_nonrigid = True
    do_motion_correction_rigid = False  # in this case it will also save a rigid motion corrected movie
    gSig_filt = (3, 3)   # size of filter, in general gSig (see below),
    #                      change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    splits_rig = 10      # for parallelization split the movies in  num_splits chuncks across time
    strides = (48, 48)   # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24)  # overlap between pathes (size of patch strides+overlaps)
    # for parallelization split the movies in  num_splits chuncks across time
    # (remember that it should hold that length_movie/num_splits_to_process_rig>100)
    splits_els = 10
    upsample_factor_grid = 4    # upsample factor to avoid smearing when merging patches
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3
    
    # parameters for source extraction and deconvolution
    p = 1               # order of the autoregressive system
    K = None            # upper bound on number of components per patch, in general None
    gSig = 3            # gaussian width of a 2D gaussian kernel, which approximates a neuron
    gSiz = 13           # average diameter of a neuron, in general 4*gSig+1
    merge_thresh = .7   # merging threshold, max correlation allowed
    rf = 40             # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
    stride_cnmf = 20    # amount of overlap between the patches in pixels
    #                     (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
    tsub = 2            # downsampling factor in time for initialization,
    #                     increase if you have memory problems
    ssub = 1            # downsampling factor in space for initialization,
    #                     increase if you have memory problems
    Ain = None          # if you want to initialize with some preselected components
    #                     you can pass them here as boolean vectors
    low_rank_background = None  # None leaves background of each patch intact,
    #                             True performs global low-rank approximation 
    gnb = -1            # number of background components (rank) if positive,
    #                     else exact ring model with following settings
    #                         gnb=-2: Return background as b and W
    #                         gnb=-1: Return full rank background B
    #                         gnb= 0: Don't return background
    nb_patch = -1       # number of background components (rank) per patch,
    #                     use 0 or -1 for exact background of ring model (cf. gnb)
    min_corr = .8       # min peak value from correlation image
    min_pnr = 10        # min peak to noise ration from PNR image
    ssub_B = 2          # additional downsampling factor in space for background
    ring_size_factor = 1.4  # radius of ring is gSiz*ring_size_factor
    
    # parameters for component evaluation
    min_SNR = 3            # adaptive way to set threshold on the transient size
    r_values_min = 0.85    # threshold on space consistency (if you lower more components
    #                        will be accepted, potentially with worst quality)

#%% start the cluster
    try:
        cm.stop_server(dview=dview)  # stop it if it was running
    except:
        pass
    
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',  # use this one
                                                     n_processes=24,  # number of process to use, if you go out of memory try to reduce this one
                                                     single_thread=False)

#%% download demo file
    fnames = [download_demo(fnames[0])]
    filename_reorder = fnames

#%% MOTION CORRECTION
    if do_motion_correction_nonrigid or do_motion_correction_rigid:
        # do motion correction rigid
        mc = motion_correct_oneP_rigid(fnames,
                                       gSig_filt=gSig_filt,
                                       max_shifts=max_shifts,
                                       dview=dview,
                                       splits_rig=splits_rig,
                                       save_movie=not(do_motion_correction_nonrigid)
                                       )
    
        new_templ = mc.total_template_rig
    
        plt.subplot(1, 2, 1)
        plt.imshow(new_templ)  # % plot template
        plt.subplot(1, 2, 2)
        plt.plot(mc.shifts_rig)  # % plot rigid shifts
        plt.legend(['x shifts', 'y shifts'])
        plt.xlabel('frames')
        plt.ylabel('pixels')
    
        # borders to eliminate from movie because of motion correction
        bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
        filename_reorder = mc.fname_tot_rig
    
        # do motion correction nonrigid
        if do_motion_correction_nonrigid:
            mc = motion_correct_oneP_nonrigid(
                fnames,
                gSig_filt=gSig_filt,
                max_shifts=max_shifts,
                strides=strides,
                overlaps=overlaps,
                splits_els=splits_els,
                upsample_factor_grid=upsample_factor_grid,
                max_deviation_rigid=max_deviation_rigid,
                dview=dview,
                splits_rig=None,
                save_movie=True,  # whether to save movie in memory mapped format
                new_templ=new_templ  # template to initialize motion correction
            )
    
            filename_reorder = mc.fname_tot_els
            bord_px = np.ceil(
                np.maximum(np.max(np.abs(mc.x_shifts_els)),
                           np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
    
    # create memory mappable file in the right order on the hard drive (C order)
    fname_new = cm.save_memmap_each(
        filename_reorder,
        base_name='memmap_',
        order='C',
        border_to_0=bord_px,
        dview=dview)
    fname_new = cm.save_memmap_join(fname_new, base_name='memmap_', dview=dview)
    
    
    # load memory mappable file
    Yr, dims, T = cm.load_memmap(fname_new)
    Y = Yr.T.reshape((T,) + dims, order='F')
#%% compute some summary images (correlation and peak to noise)
    # change swap dim if output looks weird, it is a problem with tiffile
    cn_filter, pnr = cm.summary_images.correlation_pnr(Y, gSig=gSig, swap_dim=False)
    # inspect the summary images and set the parameters
    inspect_correlation_pnr(cn_filter, pnr)
    # print parameters set above, modify them if necessary based on summary images
    print(min_corr) # min correlation of peak (from correlation image)
    print(min_pnr)  # min peak to noise ratio

#%% RUN CNMF ON PATCHES
    cnm = cnmf.CNMF(
        n_processes=n_processes,
        method_init='corr_pnr',             # use this for 1 photon
        k=K,
        gSig=(gSig, gSig),
        gSiz=(gSiz, gSiz),
        merge_thresh=merge_thresh,
        p=p,
        dview=dview,
        tsub=tsub,
        ssub=ssub,
        Ain=Ain,
        rf=rf,
        stride=stride_cnmf,
        only_init_patch=True,               # just leave it as is
        gnb=gnb,
        nb_patch=nb_patch,
        method_deconvolution='oasis',       # could use 'cvxpy' alternatively
        low_rank_background=low_rank_background,
        update_background_components=True,  # sometimes setting to False improve the results
        min_corr=min_corr,
        min_pnr=min_pnr,
        normalize_init=False,               # just leave as is
        center_psf=True,                    # leave as is for 1 photon
        ssub_B=ssub_B,
        ring_size_factor=ring_size_factor,
        del_duplicates=True,                # whether to remove duplicates from initialization
        border_pix=bord_px)                 # number of pixels to not consider in the borders
    cnm.fit(Y)


#%% DISCARD LOW QUALITY COMPONENTS
    idx_components, idx_components_bad, comp_SNR, r_values, pred_CNN = \
        estimate_components_quality_auto(
            Y, cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, frate,
            decay_time, gSig, dims, dview=dview,
            min_SNR=min_SNR, r_values_min=r_values_min, use_cnn=False)
    
    print(' ***** ')
    print((len(cnm.C)))
    print((len(idx_components)))
    
    cm.stop_server(dview=dview)

#%% PLOT COMPONENTS
    if display_images:
        plt.figure(figsize=(12, 6))
        plt.subplot(121)
        crd_good = cm.utils.visualization.plot_contours(
            cnm.A[:, idx_components], cn_filter, thr=.8, vmax=0.95)
        plt.title('Contour plots of accepted components')
        plt.subplot(122)
        crd_bad = cm.utils.visualization.plot_contours(
            cnm.A[:, idx_components_bad], cn_filter, thr=.8, vmax=0.95)
        plt.title('Contour plots of rejected components')

#%% VISUALIZE IN DETAILS COMPONENTS
        cm.utils.visualization.view_patches_bar(
            Yr, cnm.A[:, idx_components], cnm.C[idx_components], cnm.b, cnm.f,
            dims[0], dims[1], YrA=cnm.YrA[idx_components], img=cn_filter)


#%% MOVIES
    if display_images:
        B = cnm.b.dot(cnm.f)
        if 'sparse' in str(type(B)):
            B = B.toarray()
    # denoised movie
        cm.movie(np.reshape(cnm.A.tocsc()[:, idx_components].dot(cnm.C[idx_components]) + B,
                            dims + (-1,), order='F').transpose(2, 0, 1)).play(magnification=3, gain=1.)
    # only neurons
        cm.movie(np.reshape(cnm.A.tocsc()[:, idx_components].dot(
            cnm.C[idx_components]), dims + (-1,), order='F').transpose(2, 0, 1)
        ).play(magnification=3, gain=10.)
    # only the background
        cm.movie(np.reshape(B, dims + (-1,), order='F').transpose(2, 0, 1)
                 ).play(magnification=3, gain=1.)
    # residuals
        cm.movie(np.array(Y) - np.reshape(cnm.A.tocsc()[:, :].dot(cnm.C[:]) + B,
                                          dims + (-1,), order='F').transpose(2, 0, 1)
                 ).play(magnification=3, gain=10., fr=10)
    # eventually, you can rerun the algorithm on the residuals
        plt.imshow(cm.movie(np.array(Y) - np.reshape(cnm.A.tocsc()[:, :].dot(cnm.C[:]) + B,
                                                     dims + (-1,), order='F').transpose(2, 0, 1)
                            ).local_correlations(swap_dim=False))
コード例 #25
0
def test_general():
    """  General Test of pipeline with comparison against ground truth
    A shorter version than the demo pipeline that calls comparison for the real test work



        Raises:
      ---------
        params_movie

        params_cnmf

        rig correction

        cnmf on patch

        cnmf full frame

        not able to read the file

        no groundtruth


    """
    #\bug
    #\warning

    global params_movie
    global params_diplay
    fname = params_movie['fname']
    niter_rig = params_movie['niter_rig']
    max_shifts = params_movie['max_shifts']
    splits_rig = params_movie['splits_rig']
    num_splits_to_process_rig = params_movie['num_splits_to_process_rig']

    cwd = os.getcwd()
    fname = download_demo(fname[0])
    m_orig = cm.load(fname)
    min_mov = m_orig[:400].min()
    comp = comparison.Comparison()
    comp.dims = np.shape(m_orig)[1:]

    ################ RIG CORRECTION #################
    t1 = time.time()
    mc = MotionCorrect(fname,
                       min_mov,
                       max_shifts=max_shifts,
                       niter_rig=niter_rig,
                       splits_rig=splits_rig,
                       num_splits_to_process_rig=num_splits_to_process_rig,
                       shifts_opencv=True,
                       nonneg_movie=True)
    mc.motion_correct_rigid(save_movie=True)
    m_rig = cm.load(mc.fname_tot_rig)
    bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
    comp.comparison['rig_shifts']['timer'] = time.time() - t1
    comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig
    ###########################################

    if 'max_shifts' not in params_movie:
        fnames = params_movie['fname']
        border_to_0 = 0
    else:  # elif not params_movie.has_key('overlaps'):
        fnames = mc.fname_tot_rig
        border_to_0 = bord_px_rig
        m_els = m_rig

    idx_xy = None
    add_to_movie = -np.nanmin(m_els) + 1  # movie must be positive
    remove_init = 0
    downsample_factor = 1
    base_name = fname[0].split('/')[-1][:-4]
    name_new = cm.save_memmap_each(fnames,
                                   base_name=base_name,
                                   resize_fact=(1, 1, downsample_factor),
                                   remove_init=remove_init,
                                   idx_xy=idx_xy,
                                   add_to_movie=add_to_movie,
                                   border_to_0=border_to_0)
    name_new.sort()

    if len(name_new) > 1:
        fname_new = cm.save_memmap_join(name_new,
                                        base_name='Yr',
                                        n_chunks=params_movie['n_chunks'],
                                        dview=None)
    else:
        logging.warning('One file only, not saving!')
        fname_new = name_new[0]

    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Y = np.reshape(Yr, dims + (T, ), order='F')

    if np.min(images) < 0:
        # TODO: should do this in an automatic fashion with a while loop at the 367 line
        raise Exception('Movie too negative, add_to_movie should be larger')
    if np.sum(np.isnan(images)) > 0:
        # TODO: same here
        raise Exception(
            'Movie contains nan! You did not remove enough borders')

    Cn = cm.local_correlations(Y)
    Cn[np.isnan(Cn)] = 0
    p = params_movie['p']
    merge_thresh = params_movie['merge_thresh']
    rf = params_movie['rf']
    stride_cnmf = params_movie['stride_cnmf']
    K = params_movie['K']
    init_method = params_movie['init_method']
    gSig = params_movie['gSig']
    alpha_snmf = params_movie['alpha_snmf']

    if params_movie['is_dendrites'] == True:
        if params_movie['init_method'] is not 'sparse_nmf':
            raise Exception('dendritic requires sparse_nmf')
        if params_movie['alpha_snmf'] is None:
            raise Exception('need to set a value for alpha_snmf')

################ CNMF PART PATCH #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1,
                    k=K,
                    gSig=gSig,
                    merge_thresh=params_movie['merge_thresh'],
                    p=params_movie['p'],
                    dview=None,
                    rf=rf,
                    stride=stride_cnmf,
                    memory_fact=params_movie['memory_fact'],
                    method_init=init_method,
                    alpha_snmf=alpha_snmf,
                    only_init_patch=params_movie['only_init_patch'],
                    gnb=params_movie['gnb'],
                    method_deconvolution='oasis')
    comp.cnmpatch = copy.copy(cnm)
    comp.cnmpatch.estimates = None
    cnm = cnm.fit(images)
    A_tot = cnm.estimates.A
    C_tot = cnm.estimates.C
    YrA_tot = cnm.estimates.YrA
    b_tot = cnm.estimates.b
    f_tot = cnm.estimates.f
    # DISCARDING
    logging.info(('Number of components:' + str(A_tot.shape[-1])))
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_patch']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_patch']
    fitness_delta_min = params_movie['fitness_delta_min_patch']
    Npeaks = params_movie['Npeaks']
    traces = C_tot + YrA_tot
    idx_components, idx_components_bad = estimate_components_quality(
        traces,
        Y,
        A_tot,
        C_tot,
        b_tot,
        f_tot,
        final_frate=final_frate,
        Npeaks=Npeaks,
        r_values_min=r_values_min,
        fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min)
    #######
    A_tot = A_tot.tocsc()[:, idx_components]
    C_tot = C_tot[idx_components]
    comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1
    comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()]
    #################### ########################

    ################ CNMF PART FULL #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1,
                    k=A_tot.shape,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    Ain=A_tot,
                    Cin=C_tot,
                    f_in=f_tot,
                    rf=None,
                    stride=None,
                    method_deconvolution='oasis')
    cnm = cnm.fit(images)
    # DISCARDING
    A, C, b, f, YrA, sn = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, cnm.estimates.YrA, cnm.estimates.sn
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_full']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_full']
    fitness_delta_min = params_movie['fitness_delta_min_full']
    Npeaks = params_movie['Npeaks']
    traces = C + YrA
    idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality(
        traces,
        Y,
        A,
        C,
        b,
        f,
        final_frate=final_frate,
        Npeaks=Npeaks,
        r_values_min=r_values_min,
        fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min,
        return_all=True)
    ##########
    A_tot_full = A_tot.tocsc()[:, idx_components]
    C_tot_full = C_tot[idx_components]
    comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1
    comp.comparison['cnmf_full_frame']['ourdata'] = [
        A_tot_full.copy(), C_tot_full.copy()
    ]
    #################### ########################
    comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn)
    log_files = glob.glob('*_LOG_*')
    try:
        for log_file in log_files:
            os.remove(log_file)
    except:
        logging.warning('Cannot remove log files')
############ assertions ##################
    pb = False
    if (comp.information['differences']['params_movie']):
        logging.error(
            "you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)"
        )
        pb = True
    if (comp.information['differences']['params_cnm']):
        logging.warning(
            "you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)"
        )
        # pb = True
    if (comp.information['diff']['rig']['isdifferent']):
        logging.error("the rigid shifts are different from the groundtruth ")
        pb = True
    if (comp.information['diff']['cnmpatch']['isdifferent']):
        logging.error(
            "the cnmf on patch produces different results than the groundtruth "
        )
        pb = True
    if (comp.information['diff']['cnmfull']['isdifferent']):
        logging.error(
            "the cnmf full frame produces different  results than the groundtruth "
        )
        pb = True

    assert (not pb)
コード例 #26
0
    print(len(fls))
    print([fl.split('/')[-1] for fl in fls])
    fnames = fls
    border_to_0 = 0
    m_els = cm.load(fnames[0])
    idx_xy = None
    # TODO: needinfo
    add_to_movie = -np.nanmin(m_els) + 1  # movie must be positive
    # if you need to remove frames from the beginning of each file
    remove_init = 0
    # downsample movie in time: use .2 or .1 if file is large and you want a quick answer
    downsample_factor = 1
    base_name = fnames[0].split('/')[-1][:-4]
    # TODO: todocument
    name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name, resize_fact=(
        1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie,
        border_to_0=border_to_0)

    fname_new = cm.save_memmap_join(
        name_new, base_name='Yr', n_chunks=100, dview=dview)

#%%
folders = os.walk('.').next()[1]
fls_for_each = []
count = 0
for fold in folders:
    count += 1
    print('********************')
    fls = glob.glob(os.path.join(fold, 'images', 'mmap/*.mmap'))
    fls.sort()
    fls = [os.path.abspath(fl) for fl in fls]
コード例 #27
0
ファイル: mmapping.py プロジェクト: etterguillaume/CaImAn
def save_memmap(filenames,
                base_name='Yr',
                resize_fact=(1, 1, 1),
                remove_init=0,
                idx_xy=None,
                order='F',
                xy_shifts=None,
                is_3D=False,
                add_to_movie=0,
                border_to_0=0,
                dview=None,
                n_chunks=100,
                slices=None):
    """ Efficiently write data from a list of tif files into a memory mappable file

    Args:
        filenames: list
            list of tif files or list of numpy arrays

        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"

        resize_fact: tuple
            x,y, and z downsampling factors (0.5 means downsampled by a factor 2)

        remove_init: int
            number of frames to remove at the begining of each tif file
            (used for resonant scanning images if laser in rutned on trial by trial)

        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance
            idx_xy = (slice(150,350,None), slice(150,350,None))

        order: string
            whether to save the file in 'C' or 'F' order

        xy_shifts: list
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping

        is_3D: boolean
            whether it is 3D data

        add_to_movie: floating-point
            value to add to each image point, typically to keep negative values out.

        border_to_0: (undocumented)

        dview:       (undocumented)

        n_chunks:    (undocumented)

        slices: slice object or list of slice objects
            slice can be used to select portion of the movies in time and x,y
            directions. For instance 
            slices = [slice(0,200),slice(0,100),slice(0,100)] will take 
            the first 200 frames and the 100 pixels along x and y dimensions. 
    Returns:
        fname_new: the name of the mapped file, the format is such that
            the name will contain the frame dimensions and the number of frames

    """
    if type(filenames) is not list:
        raise Exception('input should be a list of filenames')

    if slices is not None:
        slices = [slice(0, None) if sl is None else sl for sl in slices]

    if len(filenames) > 1:
        recompute_each_memmap = False
        for file__ in filenames:
            if ('order_' + order not in file__) or ('.mmap' not in file__):
                recompute_each_memmap = True


        if recompute_each_memmap or (remove_init>0) or (idx_xy is not None)\
                or (xy_shifts is not None) or (add_to_movie != 0) or (border_to_0>0)\
                or slices is not None:

            logging.debug('Distributing memory map over many files')
            # Here we make a bunch of memmap files in the right order. Same parameters
            fname_new = cm.save_memmap_each(filenames,
                                            base_name=base_name,
                                            order=order,
                                            border_to_0=border_to_0,
                                            dview=dview,
                                            resize_fact=resize_fact,
                                            remove_init=remove_init,
                                            idx_xy=idx_xy,
                                            xy_shifts=xy_shifts,
                                            slices=slices,
                                            add_to_movie=add_to_movie)
        else:
            fname_new = filenames

        # The goal is to make a single large memmap file, which we do here
        if order == 'F':
            raise exception(
                'You cannot merge files in F order, they must be in C order for CaImAn'
            )

        fname_new = cm.save_memmap_join(fname_new,
                                        base_name=base_name,
                                        dview=dview,
                                        n_chunks=n_chunks)

    else:
        # TODO: can be done online
        Ttot = 0
        for idx, f in enumerate(filenames):
            if isinstance(f, str):  # Might not always be filenames.
                logging.debug(f)

            if is_3D:
                Yr = f if not (isinstance(f,
                                          basestring)) else tifffile.imread(f)
                if slices is not None:
                    Yr = Yr[slices]
                else:
                    if idx_xy is None:  #todo remove if not used, superceded by the slices parameter
                        Yr = Yr[remove_init:]
                    elif len(
                            idx_xy
                    ) == 2:  #todo remove if not used, superceded by the slices parameter
                        Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                    else:  #todo remove if not used, superceded by the slices parameter
                        Yr = Yr[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

            else:
                Yr = cm.load(f, fr=1, in_memory=True) if (isinstance(
                    f, basestring) or isinstance(f, list)) else cm.movie(
                        f)  # TODO: Rewrite more legibly
                if xy_shifts is not None:
                    Yr = Yr.apply_shifts(xy_shifts,
                                         interpolation='cubic',
                                         remove_blanks=False)

                if slices is not None:
                    Yr = Yr[slices]
                else:
                    if idx_xy is None:
                        if remove_init > 0:
                            Yr = Yr[remove_init:]
                    elif len(idx_xy) == 2:
                        Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                    else:
                        raise Exception(
                            'You need to set is_3D=True for 3D data)')
                        Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1],
                                          idx_xy[2]]

            if border_to_0 > 0:
                if slices is not None:
                    if type(slices) is list:
                        raise Exception(
                            'You cannot slice in x and y and then use add_to_movie: if you only want to slice in time do not pass in a list but just a slice object'
                        )

                min_mov = Yr.calc_min()
                Yr[:, :border_to_0, :] = min_mov
                Yr[:, :, :border_to_0] = min_mov
                Yr[:, :, -border_to_0:] = min_mov
                Yr[:, -border_to_0:, :] = min_mov

            fx, fy, fz = resize_fact
            if fx != 1 or fy != 1 or fz != 1:
                if 'movie' not in str(type(Yr)):
                    Yr = cm.movie(Yr, fr=1)
                Yr = Yr.resize(fx=fx, fy=fy, fz=fz)

            T, dims = Yr.shape[0], Yr.shape[1:]
            Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
            Yr = np.reshape(Yr, (np.prod(dims), T), order='F')
            Yr = np.ascontiguousarray(Yr, dtype=np.float32) + np.float32(
                0.0001) + np.float32(add_to_movie)

            if idx == 0:
                fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(
                    dims[1]) + '_d3_' + str(
                        1 if len(dims) == 2 else dims[2]) + '_order_' + str(
                            order)  # TODO: Rewrite more legibly
                if isinstance(f, str):
                    fname_tot = os.path.join(os.path.split(f)[0], fname_tot)
                if len(filenames) > 1:
                    big_mov = np.memmap(fname_tot,
                                        mode='w+',
                                        dtype=np.float32,
                                        shape=prepare_shape(
                                            (np.prod(dims), T)),
                                        order=order)
                    big_mov[:, Ttot:Ttot + T] = Yr
                    del big_mov
                else:
                    logging.debug('SAVING WITH numpy.tofile()')
                    Yr.tofile(fname_tot)
            else:
                big_mov = np.memmap(fname_tot,
                                    dtype=np.float32,
                                    mode='r+',
                                    shape=prepare_shape(
                                        (np.prod(dims), Ttot + T)),
                                    order=order)

                big_mov[:, Ttot:Ttot + T] = Yr
                del big_mov

            sys.stdout.flush()
            Ttot = Ttot + T

        fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
        try:
            # need to explicitly remove destination on windows
            os.unlink(fname_new)
        except OSError:
            pass
        os.rename(fname_tot, fname_new)

    return fname_new
コード例 #28
0
for file in glob.glob(os.path.join(base_folder, '*.tif')):
    if file.endswith("ie.tif"):
        fnames.append(os.path.abspath(file))
fnames.sort()
if len(fnames) == 0:
    raise Exception("Could not find any tiff file")
print(fnames)
fnames = fnames
#%%
# idx_x=slice(12,500,None)
# idx_y=slice(12,500,None)
# idx_xy=(idx_x,idx_y)
downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name, resize_fact=(
    1, 1, downsample_factor), remove_init=0, idx_xy=idx_xy)
name_new.sort()
print(name_new)
#%%
name_new = cm.save_memmap_each(fnames, dview=dview, base_name='Yr', resize_fact=(
    1, 1, 1), remove_init=0, idx_xy=None)
name_new.sort()
#%%
fname_new = cm.save_memmap_join(
    name_new, base_name='Yr', n_chunks=12, dview=dview)
#%%
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
Y = np.reshape(Yr, dims + (T,), order='F')
#%% visualize correlation image
Cn = cm.local_correlations(Y)