Beispiel #1
0
def seeded_Caiman_wAgonia(data_path, opts, agonia_th):
    '''Run Caiman using as seeds the boxes detected with Agonia and save results
    Parameters
    ----------
    data_path : string
        path to folder containing the data
    opts : caiman object with the options for motion correction
    agonia_th : float
        threshold for detection confidence for each box'''
    data_name, median_projection, fnames, fname_new, results_caiman_path, boxes_path = get_files_names(
        data_path)

    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    #load frames in python format (T x X x Y)

    #%% restart cluster to clean up memory
    if 'dview' in locals():
        cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=12,
                                                     single_thread=False)

    # Load the boxes in the pickle file into the ROIs arrays.
    with open(boxes_path, 'rb') as f:
        cajas = pickle.load(f)
        f.close()
    cajas = cajas[cajas[:, 4] > agonia_th]

    #use the average size of the Agonia boxes as the cell size parameter for Caiman
    half_width = np.mean(cajas[:, 2] - cajas[:, 0]) / 2
    half_height = np.mean(cajas[:, 3] - cajas[:, 1]) / 2
    gSig = (half_width.astype(int), half_height.astype(int))
    opts_dict = {'gSig': gSig}
    opts.change_params(opts_dict)

    #Build masks with the Agonia boxes
    Ain = np.zeros((np.prod(images.shape[1:]), cajas.shape[0]), dtype=bool)
    for i, box in enumerate(cajas):
        frame = np.zeros(images.shape[1:])
        #note that the coordinates of the boxes are transpose with respect to Caiman objects
        frame[box[1].astype('int'):box[3].astype('int'),
              box[0].astype('int'):box[2].astype('int')] = 1
        Ain[:, i] = frame.flatten('F')

    #Run analysis
    cnm_seeded = cnmf.CNMF(n_processes, params=opts, dview=dview, Ain=Ain)
    try:
        print('initiating fit')
        cnm_seeded.fit(images)
        print('saving results')
        #cnm_seeded.estimates.detrend_df_f(quantileMin=50,frames_window=750,detrend_only=False)
        cnm_seeded.save(
            os.path.join(
                data_path,
                os.path.splitext(data_name[0])[0] + '_analysis_results.hdf5'))
    except:
        print('Tenemos un problema...')

    cm.stop_server(dview=dview)
Beispiel #2
0
def run_cnmf(n_processes, opts, dview, images):
    """The FOV is split is different overlapping patches that are subsequently
    processed in parallel by the CNMF algorithm. The results from all the
    patches are merged with special attention to idendtified components on the
    border. The results are then refined by additional CNMF iterations."""
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0). If you want to have
    # deconvolution within each patch change params.patch['p_patch'] to a
    # nonzero value
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)
    return cnm
Beispiel #3
0
def run_pipeline(n_processes, opts, dview, do_mc=True, time_it=False):
    """Run the combined steps of motion correction, memory mapping, and cnmf
    fitting in one step."""
    # Start timer
    start = time.time()

    # Run pipeline
    cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm1.fit_file(motion_correct=do_mc)

    # Print time if requested
    if time_it:
        print('Pipeline processing took {:.3f} seconds'.format(time.time() -
                                                               start))
    return cnm1
 def do_fit(self):
     """
     Perform the CNMF calculation.
     """
     t = tic()
     print('Starting motion correction and CNMF...')
     cnm_seeded = cnmf.CNMF(self.n_processes,
                            params=self.opts,
                            dview=self.dview,
                            Ain=self.Ain)
     cnm_seeded.fit(self.movie)
     self.coords = extract_cell_locs(cnm_seeded)
     cnm_seeded.estimates.detrend_df_f()
     self.dff = cnm_seeded.estimates.F_dff
     cnm_seeded.save(self.save_folder +
                     f'caiman_data_{self.fnumber:04}.hdf5')
     print(f'CNMF fitting done. Took {toc(t):.4f}s')
     return cnm_seeded.estimates.C
    def cnmf_neurofinder(params_dict):
        import numpy as np
        from caiman.source_extraction.cnmf import cnmf
        from caiman.source_extraction.cnmf import params

        fname_new = params_dict['fnames'][0]
        print(fname_new)
        Yr, dims, T = cm.load_memmap(fname_new)
        images = np.reshape(Yr.T, [T] + list(dims), order='F')
        opts = params.CNMFParams(params_dict=params_dict)
        dview = params_dict['dview']
        print('Starting CNMF')
        opts.set('temporal', {'p': 0})
        cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
        cnm = cnm.fit(images)
        cnm.params.change_params({'update_background_components': True,
                                  'skip_refinement': False,
                                  'n_pixels_per_process': 4000, 'dview': dview})
        opts.set('temporal', {'p': params_dict['p']})
        cnm2 = cnm.refit(images, dview=dview)
        cnm2.save(fname_new[:-5] + '_cnmf.hdf5')

        return cnm2
    def do_final_fit(self):
        """
        Do the last fit on all the tiffs in the folder. This makes an entirely concenated cnmf fit.
        """
        t = tic()
        print(f'processing files: {self.tiffs}')
        self.opts.change_params(dict(fnames=self.tiffs))

        maplist = []
        for i in range(self.fnumber):
            m = glob(self.folder + f'MAP{i}a_*')[0]
            maplist.append(m)

        # all_memmaps = glob(self.folder + 'MAP00*.mmap')
        memmap = cm.save_memmap_join(maplist,
                                     base_name='FINAL',
                                     dview=self.dview)

        Yr, dims, T = cm.load_memmap(memmap)
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        cnm_seeded = cnmf.CNMF(self.n_processes,
                               params=self.opts,
                               dview=self.dview,
                               Ain=self.Ain)
        cnm_seeded.fit(images)
        cnm_seeded.save(self.save_folder + 'FINAL_caiman_data.hdf5')

        self.coords = cm.utils.visualization.get_contours(
            cnm_seeded.estimates.A, dims=cnm_seeded.dims)
        cnm_seeded.estimates.detrend_df_f()
        self.dff = cnm_seeded.estimates.F_dff
        self.C = cnm_seeded.estimates.C

        #self.save_json()
        print(f'CNMF fitting done. Took {toc(t):.4f}s')
        print('Caiman online analysis done.')
Beispiel #7
0
def compute_cnmf(n_processes, dview, images, dims):
    '''Applying CNMF algorithm on the video
        
        Args:
            n_processes:
                Number of parallel processing
                
            dview:
                Direct View object for parallelization pruposes when using ipyparallel
                
            images:
                The input video
                shape: (frames, dims, dims)
            
            dims:
                Dimension of each frame
                shape: (521, 521)
                
        Returns:
            cnm:
                Object obtained from CNMF algorithm with C,A,S,b,f
        
        '''
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    gnb=gnb,
                    rf=rf,
                    stride=stride,
                    rolling_sum=False)
    cnm = cnm.fit(images)

    return cnm
Beispiel #8
0
# expected half size of neurons
gSig = params_movie['gSig']
# this controls sparsity
alpha_snmf = params_movie['alpha_snmf']
# frame rate of movie (even considering eventual downsampling)
final_frate = params_movie['final_frate']


if params_movie['is_dendrites'] == True:
    if params_movie['init_method'] is not 'sparse_nmf':
        raise Exception('dendritic requires sparse_nmf')
    if params_movie['alpha_snmf'] is None:
        raise Exception('need to set a value for alpha_snmf')
#%% Extract spatial and temporal components on patches
t1 = time.time()
cnm = cnmf.CNMF(n_processes, k=K, gSig=gSig, merge_thresh=0.8, p=0, dview=dview, Ain=None, rf=rf, stride=stride_cnmf, memory_fact=1,
                method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=True, gnb=1, method_deconvolution='oasis', remove_very_bad_comps=True)
cnm = cnm.fit(images)

A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
t2 = time.time() - t1
print(('Number of components:' + str(A_tot.shape[-1])))
#%%
np.savez(os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[
         1][:-4] + 'results_analysis_ZEBRA_patch.npz'), Cn=Cn, A=A_tot, C=C_tot, b=b_tot, f=f_tot, YrA=YrA_tot, sn=sn_tot, d1=d1, d2=d2)
#%%
pl.figure()
Beispiel #9
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

#%% Select file(s) to be processed (download if not present)
    fnames = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
        fnames = [download_demo(fnames[0])]

#%% First setup some parameters for data and motion correction

    # dataset dependent parameters
    fr = 30             # imaging rate in frames per second
    decay_time = 0.4    # length of a typical transient in seconds
    dxy = (2., 2.)      # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)       # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um

    # motion correction parameters
    pw_rigid = True       # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
    # start a new patch for pw-rigid motion correction every x pixels
    strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
    # overlap between pathes (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

# %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q
    display_images = True

    if display_images:
        m_orig = cm.load_movie_chain(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # note that the file is not loaded in memory

# %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

# %% compare with original movie
    if display_images:
        m_orig = cm.load_movie_chain(fnames)
        m_els = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie,
                                      m_els.resize(1, 1, ds_ratio)], axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit

# %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file, base_name='memmap_', order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

# %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%  parameters for source extraction and deconvolution
    p = 1                    # order of the autoregressive system
    gnb = 2                  # number of global background components
    merge_thr = 0.85         # merging threshold, max correlation allowed
    rf = 15
    # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6          # amount of overlap between the patches in pixels
    K = 4                    # number of components per patch
    gSig = [4, 4]            # expected half size of neurons in pixels
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    method_init = 'greedy_roi'
    ssub = 2                     # spatial subsampling during initialization
    tsub = 2                     # temporal subsampling during intialization

    # parameters for component evaluation
    opts_dict = {'fnames': fnames,
                 'p': p,
                 'fr': fr,
                 'nb': gnb,
                 'rf': rf,
                 'K': K,
                 'gSig': gSig,
                 'stride': stride_cnmf,
                 'method_init': method_init,
                 'rolling_sum': True,
                 'merge_thr': merge_thr,
                 'n_processes': n_processes,
                 'only_init': True,
                 'ssub': ssub,
                 'tsub': tsub}

    opts.change_params(params_dict=opts_dict);
# %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0). If you want to have
    # deconvolution within each patch change params.patch['p_patch'] to a
    # nonzero value

    #opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

# %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)

# %% plot contours of found components
    Cns = local_correlations_movie_offline(mc.mmap_file[0],
                                           remove_baseline=True, window=1000, stride=1000,
                                           winSize_baseline=100, quantil_min_baseline=10,
                                           dview=dview)
    Cn = Cns.max(axis=0)
    Cn[np.isnan(Cn)] = 0
    cnm.estimates.plot_contours(img=Cn)
    plt.title('Contour plots of found components')
#%% save results
    cnm.estimates.Cn = Cn
    cnm.save(fname_new[:-5]+'_init.hdf5')

# %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # signal to noise ratio for accepting a component
    rval_thr = 0.85  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected

    cnm2.params.set('quality', {'decay_time': decay_time,
                               'min_SNR': min_SNR,
                               'rval_thr': rval_thr,
                               'use_cnn': True,
                               'min_cnn_thr': cnn_thr,
                               'cnn_lowest': cnn_lowest})
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)
    # %% PLOT COMPONENTS
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)

    # %% VIEW TRACES (accepted and rejected)

    if display_images:
        cnm2.estimates.view_components(images, img=Cn,
                                      idx=cnm2.estimates.idx_components)
        cnm2.estimates.view_components(images, img=Cn,
                                      idx=cnm2.estimates.idx_components_bad)
    #%% update object with selected components
    cnm2.estimates.select_components(use_object=True)
    #%% Extract DF/F values
    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)

    #%% Show final traces
    cnm2.estimates.view_components(img=Cn)
    #%%
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')
    #%% reconstruct denoised movie (press q to exit)
    if display_images:
        cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2,
                                  magnification=2,
                                  bpx=border_to_0,
                                  include_bck=False)  # background not shown

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%%
    """
    General parameters
    """
    play_movie = 1
    plot_extras = 1
    plot_extras_cell = 1
    compute_mc_metrics = 1

    #%% Select file(s) to be processed (download if not present)
    """
    Load file
    """

    #fnames = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    #if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
    #    fnames = [download_demo(fnames[0])]

    #fnames = ['/home/yuriy/Desktop/Data/rest1_5_9_19_cut.tif']

    #f_dir = 'C:\\Users\\rylab_dataPC\\Desktop\\Yuriy\\caiman_data\\short\\'
    f_dir = 'G:\\analysis\\190828-calcium_voltage\\soma_dendrites\\pCAG_jREGECO1a_ASAP3_anesth_001\\'
    f_name = 'Ch1'
    f_ext = 'tif'
    fnames = [f_dir + f_name + '.' + f_ext]

    #fnames = ['C:/Users/rylab_dataPC/Desktop/Yuriy/caiman_data/rest1_5_9_19_2_cut_ca.hdf5']

    #%% First setup some parameters for data and motion correction
    """
    Parameters
    """

    # dataset dependent parameters
    fr = 30  # imaging rate in frames per second
    decay_time = 1
    #0.4    # length of a typical transient in seconds
    dxy = (2., 2.)  # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)  # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um

    # motion correction parameters
    pw_rigid = True  # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    #max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
    max_shifts = [6, 6]
    # start a new patch for pw-rigid motion correction every x pixels
    #strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
    strides = [48, 48]
    # overlap between pathes (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

    # %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q

    if play_movie:
        m_orig = cm.load_movie_chain(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

    # %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # note that the file is not loaded in memory

    # %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

    # type "mc."and press TAB to see all interesting associated variables and self. outputs
    # interesting outputs
    # saved file is mc.fname_tot_els / mc.fname_tot_rig
    # mc.x_shifts_els / mc.y_shifts_els: shifts in x/y per frame per patch
    # mc.coord_shifts_els: coordinates associated to patches with shifts
    # mc.total_template_els: updated template for pw
    # mc.total_template_rig: updated template for rigid
    # mc.templates_rig: templates for each iteration in rig

    #%% # compute metrics for the results (TAKES TIME!!)
    if compute_mc_metrics:
        # not finished
        bord_px_els = np.ceil(
            np.maximum(np.max(np.abs(mc.x_shifts_els)),
                       np.max(np.abs(mc.y_shifts_els)))).astype(np.int)

        final_size = np.subtract(
            mc.total_template_els.shape,
            2 * bord_px_els)  # remove pixels in the boundaries
        winsize = 100
        swap_dim = False
        resize_fact_flow = .2  # downsample for computing ROF

        tmpl_rig, correlations_orig, flows_orig, norms_orig, crispness_orig = cm.motion_correction.compute_metrics_motion_correction(
            fnames[0],
            final_size[0],
            final_size[1],
            swap_dim,
            winsize=winsize,
            play_flow=False,
            resize_fact_flow=resize_fact_flow)

        plt.figure()
        plt.plot(correlations_orig)

    # %% compare with original movie
    if play_movie:
        m_orig = cm.load_movie_chain(fnames)
        m_els = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([
            m_orig.resize(1, 1, ds_ratio) - mc.min_mov * mc.nonneg_movie,
            m_els.resize(1, 1, ds_ratio)
        ],
                                     axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit
        del m_orig
        del m_els

    if plot_extras:
        # plot total template
        plt.figure()
        plt.imshow(mc.total_template_els)
        plt.title('Template after iteration')
        # plot x and y corrections
        plt.figure()
        plt.plot(mc.shifts_rig)
        plt.title('Rigid motion correction xy movement')
        plt.legend(['x shift', 'y shift'])
        plt.xlabel('frames')

    # %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file,
                               base_name='memmap_',
                               order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

    # %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%  parameters for source extraction and deconvolution

    p = 2  # order of the autoregressive system
    gnb = 2  # number of global background components
    merge_thr = 0.85  # merging threshold, max correlation allowed
    rf = 15  # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6  # amount of overlap between the patches in pixels
    K = 2  # number of components per patch
    gSig = [15, 15]  # expected half size of neurons in pixels
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    method_init = 'greedy_roi'
    ssub = 1  # spatial subsampling during initialization
    tsub = 1  # temporal subsampling during intialization

    # parameters for component evaluation
    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'nb': gnb,
        'rf': rf,
        'K': K,
        'gSig': gSig,
        'stride': stride_cnmf,
        'method_init': method_init,
        'rolling_sum': True,
        'merge_thr': merge_thr,
        'n_processes': n_processes,
        'only_init': True,
        'ssub': ssub,
        'tsub': tsub
    }

    opts.change_params(params_dict=opts_dict)
    # %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0)

    opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

    if plot_extras_cell:
        num_cell_plot = 51
        plt.figure()
        plt.plot(cnm.estimates.C[num_cell_plot, :])
        plt.title('Temporal component')
        plt.legend(['Cell ' + str(num_cell_plot)])
        # plot component sptial profile A
        # first convert back to dense components
        plot_spat_A = cnm.estimates.A[:, num_cell_plot].toarray().reshape(
            list(dims))
        plt.figure()
        plt.imshow(plot_spat_A)
        plt.title('Spatial component cell ' + str(num_cell_plot))

    # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)

    # %% plot contours of found components
    Cn = cm.local_correlations(images, swap_dim=False)
    Cn[np.isnan(Cn)] = 0
    cnm.estimates.plot_contours(img=Cn)
    plt.title('Contour plots of found components')

    if plot_extras:
        plt.figure()
        plt.imshow(Cn)
        plt.title('Local correlations')

    # %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm.params.change_params({'p': p})
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # signal to noise ratio for accepting a component
    rval_thr = 0.90  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected

    cnm2.params.set(
        'quality', {
            'decay_time': decay_time,
            'min_SNR': min_SNR,
            'rval_thr': rval_thr,
            'use_cnn': True,
            'min_cnn_thr': cnn_thr,
            'cnn_lowest': cnn_lowest
        })
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% PLOT COMPONENTS
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    plt.suptitle('Component selection: min_SNR=' + str(min_SNR) +
                 '; rval_thr=' + str(rval_thr) + '; cnn prob range=[' +
                 str(cnn_lowest) + ' ' + str(cnn_thr) + ']')

    # %% VIEW TRACES (accepted and rejected)

    if plot_extras:
        cnm2.estimates.view_components(images,
                                       img=Cn,
                                       idx=cnm2.estimates.idx_components)
        plt.suptitle('Accepted')
        cnm2.estimates.view_components(images,
                                       img=Cn,
                                       idx=cnm2.estimates.idx_components_bad)
        plt.suptitle('Rejected')

    #plt.figure();
    #plt.plot(cnm2.estimates.YrA[0,:]+cnm2.estimates.C[0,:])
    #
    #
    #
    #
    #plt.figure();
    #plt.plot(cnm2.estimates.R[0,:]-cnm2.estimates.YrA[0,:]);
    #plt.plot();
    #plt.show();
    #
    #
    #plt.figure();
    #plt.plot(cnm2.estimates.detrend_df_f[1,:])

    # these store the good and bad components, and next step sorts them
    # cnm2.estimates.idx_components
    # cnm2.estimates.idx_components_bad

    #%% update object with selected components
    #cnm2.estimates.select_components(use_object=True)
    #%% Extract DF/F values
    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)

    #%% Show final traces

    cnm2.estimates.view_components(img=Cn)
    plt.suptitle("Final results")

    #%% Save the mc data as in cmn struct as well

    ##
    #mc_out = dict(
    #            pw_rigid            = mc.pw_rigid,
    #            fname               = mc.fname,
    #            mmap_file           = mc.mmap_file,
    #            total_template_els  = mc.total_template_els,
    #            total_template_rig  = mc.total_template_rig,
    #            border_nan          = mc.border_nan,
    #            border_to_0         = mc.border_to_0,
    #            x_shifts_els        = mc.x_shifts_els,
    #            y_shifts_els        = mc.y_shifts_els,
    #            Cn                  = Cn
    #            )
    #
    #
    #deepdish.io.save(fnames[0] + '_mc_data.hdf5', mc_out)

    #%% reconstruct denoised movie (press q to exit)
    if play_movie:
        cnm2.estimates.play_movie(images,
                                  q_max=99.9,
                                  gain_res=2,
                                  magnification=2,
                                  bpx=border_to_0,
                                  include_bck=False)  # background not shown

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    save_results = True
    if save_results:
        cnm2.save(fnames[0][:-4] + '_results.hdf5')
Beispiel #11
0
        raise Exception('need to set a value for alpha_snmf')
#%%
t1 = time.time()
n_pixels_per_process = 4000  # the smaller the best memory performance
block_size = 50000

cnm = cnmf.CNMF(n_processes,
                k=K,
                gSig=gSig,
                merge_thresh=0.8,
                p=0,
                dview=dview,
                Ain=None,
                rf=rf,
                stride=stride,
                memory_fact=1,
                method_init=init_method,
                alpha_snmf=alpha_snmf,
                only_init_patch=True,
                gnb=1,
                method_deconvolution='oasis',
                n_pixels_per_process=n_pixels_per_process,
                p_ssub=2,
                p_tsub=2,
                block_size=block_size,
                check_nan=False)
cnm = cnm.fit(images)

A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
    K = 25  # number of neurons expected (in the whole FOV)

gSig = [6, 6]  # expected half size of neurons
merge_thresh = 0.80  # merging threshold, max correlation allowed
p = 1  # order of the autoregressive system
gnb = 1  # global background order
border_pix = 5

#%% Now RUN CNMF
cnm = cnmf.CNMF(n_processes,
                method_init='sparse_nmf',
                k=K,
                gSig=gSig,
                merge_thresh=merge_thresh,
                p=p,
                dview=dview,
                gnb=gnb,
                rf=rf,
                stride=stride,
                rolling_sum=False,
                alpha_snmf=0.1,
                border_pix=border_pix,
                only_init_patch=False)
cnm = cnm.fit(images)

#%% plot contour plots of components
plt.figure()
crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
plt.title('Contour plots of components')
#%%
cm.movie(
    np.reshape(Yr - cnm.A.dot(cnm.C) - cnm.b.dot(cnm.f),
Beispiel #13
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% First setup some parameters

    # dataset dependent parameters
    display_images = False  # Set this to true to show movies and plots
    fname = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    fr = 30  # imaging rate in frames per second
    decay_time = 0.4  # length of a typical transient in seconds

    # motion correction parameters
    niter_rig = 1  # number of iterations for rigid motion correction
    max_shifts = (6, 6)  # maximum allow rigid shift
    # for parallelization split the movies in  num_splits chuncks across time
    splits_rig = 56
    # start a new patch for pw-rigid motion correction every x pixels
    strides = (48, 48)
    # overlap between pathes (size of patch strides+overlaps)
    overlaps = (24, 24)
    # for parallelization split the movies in  num_splits chuncks across time
    splits_els = 56
    upsample_factor_grid = 4  # upsample factor to avoid smearing when merging patches
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    # parameters for source extraction and deconvolution
    p = 1  # order of the autoregressive system
    gnb = 2  # number of global background components
    merge_thresh = 0.8  # merging threshold, max correlation allowed
    # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    rf = 15
    stride_cnmf = 6  # amount of overlap between the patches in pixels
    K = 4  # number of components per patch
    gSig = [4, 4]  # expected half size of neurons
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    init_method = 'greedy_roi'
    is_dendrites = False  # flag for analyzing dendritic data
    # sparsity penalty for dendritic data analysis through sparse NMF
    alpha_snmf = None

    # parameters for component evaluation
    min_SNR = 2.5  # signal to noise ratio for accepting a component
    rval_thr = 0.8  # space correlation threshold for accepting a component
    cnn_thr = 0.8  # threshold for CNN based classifier

    #%% download the dataset if it's not present in your folder
    if fname[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
        fname = [download_demo(fname[0])]

#%% play the movie
# playing the movie using opencv. It requires loading the movie in memory. To
# close the video press q

    m_orig = cm.load_movie_chain(fname[:1])
    downsample_ratio = 0.2
    offset_mov = -np.min(m_orig[:100])
    moviehandle = m_orig.resize(1, 1, downsample_ratio)
    if display_images:
        moviehandle.play(gain=10, offset=offset_mov, fr=30, magnification=2)

#%% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    #%%% MOTION CORRECTION
    # first we create a motion correction object with the parameters specified
    min_mov = cm.load(fname[0], subindices=range(200)).min()
    # this will be subtracted from the movie to make it non-negative

    mc = MotionCorrect(fname[0],
                       min_mov,
                       dview=dview,
                       max_shifts=max_shifts,
                       niter_rig=niter_rig,
                       splits_rig=splits_rig,
                       strides=strides,
                       overlaps=overlaps,
                       splits_els=splits_els,
                       upsample_factor_grid=upsample_factor_grid,
                       max_deviation_rigid=max_deviation_rigid,
                       shifts_opencv=True,
                       nonneg_movie=True)
    # note that the file is not loaded in memory

    #%% Run piecewise-rigid motion correction using NoRMCorre
    mc.motion_correct_pwrigid(save_movie=True)
    m_els = cm.load(mc.fname_tot_els)
    bord_px_els = np.ceil(
        np.maximum(np.max(np.abs(mc.x_shifts_els)),
                   np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
    # maximum shift to be used for trimming against NaNs
    #%% compare with original movie
    moviehandle = cm.concatenate([
        m_orig.resize(1, 1, downsample_ratio) + offset_mov,
        m_els.resize(1, 1, downsample_ratio)
    ],
                                 axis=2)
    display_images = False
    if display_images:
        moviehandle.play(fr=60, q_max=99.5, magnification=2,
                         offset=0)  # press q to exit

#%% MEMORY MAPPING
# memory map the file in order 'C'
    fnames = mc.fname_tot_els  # name of the pw-rigidly corrected file.
    border_to_0 = bord_px_els  # number of pixels to exclude
    fname_new = cm.save_memmap(fnames,
                               base_name='memmap_',
                               order='C',
                               border_to_0=bord_px_els)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

    #%% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    #%% RUN CNMF ON PATCHES

    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0)
    t1 = time.time()

    cnm = cnmf.CNMF(n_processes=1,
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=0,
                    dview=dview,
                    rf=rf,
                    stride=stride_cnmf,
                    memory_fact=1,
                    method_init=init_method,
                    alpha_snmf=alpha_snmf,
                    only_init_patch=False,
                    gnb=gnb,
                    border_pix=bord_px_els)
    cnm = cnm.fit(images)

    #%% plot contours of found components
    Cn = cm.local_correlations(images.transpose(1, 2, 0))
    Cn[np.isnan(Cn)] = 0
    plt.figure()
    crd = plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of found components')

    #%% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=False,
                                         thresh_cnn_min=cnn_thr)

    #%% PLOT COMPONENTS

    if display_images:
        plt.figure()
        plt.subplot(121)
        crd_good = cm.utils.visualization.plot_contours(cnm.A[:,
                                                              idx_components],
                                                        Cn,
                                                        thr=.8,
                                                        vmax=0.75)
        plt.title('Contour plots of accepted components')
        plt.subplot(122)
        crd_bad = cm.utils.visualization.plot_contours(
            cnm.A[:, idx_components_bad], Cn, thr=.8, vmax=0.75)
        plt.title('Contour plots of rejected components')

#%% VIEW TRACES (accepted and rejected)

    if display_images:
        view_patches_bar(Yr,
                         cnm.A.tocsc()[:, idx_components],
                         cnm.C[idx_components],
                         cnm.b,
                         cnm.f,
                         dims[0],
                         dims[1],
                         YrA=cnm.YrA[idx_components],
                         img=Cn)

        view_patches_bar(Yr,
                         cnm.A.tocsc()[:, idx_components_bad],
                         cnm.C[idx_components_bad],
                         cnm.b,
                         cnm.f,
                         dims[0],
                         dims[1],
                         YrA=cnm.YrA[idx_components_bad],
                         img=Cn)

#%% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    A_in, C_in, b_in, f_in = cnm.A[:, idx_components], cnm.C[
        idx_components], cnm.b, cnm.f
    cnm2 = cnmf.CNMF(n_processes=1,
                     k=A_in.shape[-1],
                     gSig=gSig,
                     p=p,
                     dview=dview,
                     merge_thresh=merge_thresh,
                     Ain=A_in,
                     Cin=C_in,
                     b_in=b_in,
                     f_in=f_in,
                     rf=None,
                     stride=None,
                     gnb=gnb,
                     method_deconvolution='oasis',
                     check_nan=True)

    cnm2 = cnm2.fit(images)

    #%% Extract DF/F values

    F_dff = detrend_df_f(cnm2.A,
                         cnm2.b,
                         cnm2.C,
                         cnm2.f,
                         YrA=cnm2.YrA,
                         quantileMin=8,
                         frames_window=250)

    #%% Show final traces
    cnm2.view_patches(Yr, dims=dims, img=Cn)

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

#%% reconstruct denoised movie
    denoised = cm.movie(cnm2.A.dot(cnm2.C) + cnm2.b.dot(cnm2.f)).reshape(
        dims + (-1, ), order='F').transpose([2, 0, 1])

    #%% play along side original data
    moviehandle = cm.concatenate([
        m_els.resize(1, 1, downsample_ratio),
        denoised.resize(1, 1, downsample_ratio)
    ],
                                 axis=2)
    if display_images:
        moviehandle.play(fr=60, gain=15, magnification=2,
                         offset=0)  # press q to exit
Beispiel #14
0
alpha_snmf = params_movie['alpha_snmf']
# frame rate of movie (even considering eventual downsampling)
final_frate = params_movie['final_frate']

if params_movie['is_dendrites'] == True:
    if params_movie['init_method'] is not 'sparse_nmf':
        raise Exception('dendritic requires sparse_nmf')
    if params_movie['alpha_snmf'] is None:
        raise Exception('need to set a value for alpha_snmf')
# %% Extract spatial and temporal components on patches
t1 = time.time()
# TODO: todocument
# TODO: warnings 3
cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'],
                dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
                method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[
                    'only_init_patch'],
                gnb=params_movie['gnb'], method_deconvolution='oasis')
comp.cnmpatch = copy.copy(cnm)
cnm = cnm.fit(images)

A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1
comp.comparison['cnmf_on_patch']['ourdata'] = [cnm.A.copy(), cnm.C.copy()]
print(('Number of components:' + str(A_tot.shape[-1])))
# %%
def run():
    date_recorded = '190503'
    mouse_id = 'M439939'
    play_movie = False
    resolution = 512
    channel = 'green'

    data_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data\{}-{}-2p" \
                  r"\110_LSNDGC_reorged".format(date_recorded, mouse_id)

    curr_folder = os.path.dirname(os.path.realpath(__file__))

    plane_ns = [
        f for f in os.listdir(data_folder)
        if os.path.isdir(f) and f[:5] == 'plane'
    ]
    plane_ns.sort()
    print('planes:')
    print('\n'.join(plane_ns))

    # %% start cluster
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=6,
                                                     single_thread=False)

    for plane_n in plane_ns:

        print('\nsegmenting plane: {}'.format(plane_n))

        plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected')
        os.chdir(plane_folder)

        fn = [f for f in os.listdir(plane_folder) if f[-5:] == '.mmap']
        if len(fn) > 1:
            print('\n'.join(fn))
            raise LookupError('more than one file found.')
        elif len(fn) == 0:
            raise LookupError('no file found.')
        else:
            fn = fn[0]

        fn_parts = fn.split('_')
        d1 = int(fn_parts[fn_parts.index('d1') + 1])  # column, x
        d2 = int(fn_parts[fn_parts.index('d2') + 1])  # row, y
        d3 = int(fn_parts[fn_parts.index('d3') + 1])  # channel
        d4 = int(fn_parts[fn_parts.index('frames') + 1])  # frame, T
        order = fn_parts[fn_parts.index('order') + 1]

        print('playing {} ...'.format(fn))

        mov = np.memmap(filename=fn,
                        shape=(d1, d2, d4),
                        order=order,
                        dtype=np.float32,
                        mode='r')
        mov = mov.transpose((2, 1, 0))

        print('shape of joined movie: {}.'.format(mov.shape))

        #%% play movie, press q to quit
        if play_movie:
            cm.movie(mov).play(fr=50, magnification=1, gain=2.)

        #%% movie cannot be negative!
        mov_min = float(np.amin(mov))
        print('minimum pixel value: {}.'.format(mov_min))
        if mov_min < 0:
            raise Exception(
                'Movie too negative, add_to_movie should be larger')

        #%% correlation image. From here infer neuron size and density
        Cn = cm.movie(mov).local_correlations(swap_dim=False)
        # plt.imshow(Cn, cmap='gray')
        # plt.show()

        K = 100  # number of neurons expected per patch
        gSig = [5, 5]  # expected half size of neurons
        merge_thresh = 0.9  # merging threshold, max correlation allowed
        p = 2  # order of the autoregressive system
        cnm = cnmf.CNMF(
            n_processes,
            k=10,  # number of neurons expected per patch
            gSig=[5, 5],  # expected half size of neurons
            merge_thresh=0.9,  # merging threshold, max correlation allowed
            p=2,  # order of the autoregressive system
            dview=dview,
            Ain=None,
            method_deconvolution='oasis',
            rolling_sum=False,
            method_init='sparse_nmf',
            alpha_snmf=10e1,
            ssub=1,
            tsub=1,
            p_ssub=1,
            p_tsub=1,
            rf=int(resolution / 2),  # half-size of the patches in pixels
            border_pix=0,
            do_merge=False)
        cnm = cnm.fit(mov)
        A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
        #%%
        # crd = cm.utils.visualization.plot_contours(cnm.A, Cn)
        # plt.show()
        # input("Press enter to continue ...")

        roi_num = cnm.A.shape[1]
        save_fn = h5py.File('caiman_segmentation_results.hdf5')
        bias = save_fn['bias_added_to_movie'].value
        save_fn['masks'] = np.array(cnm.A.todense()).T.reshape(
            (roi_num, 512, 512), order='F')
        save_fn['traces'] = cnm.C - bias
        save_fn.close()

        copyfile(
            os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'),
            os.path.join(curr_folder, plane_n,
                         'caiman_segmentation_results.hdf5'))

        plt.close('all')
def perform_cnmf(video, params, roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints, use_multiprocessing=True):
    if use_multiprocessing:
        backend = 'multiprocessing'

        cm.stop_server()
        c, dview, n_processes = cm.cluster.setup_cluster(backend=backend, n_processes=None, single_thread=False)
    else:
        dview = None

    # print("~")
    # print(roi_spatial_footprints.shape)

    video_path = "video_temp.tif"
    tifffile.imsave(video_path, video)

    # dataset dependent parameters
    fnames         = [video_path]          # filename to be processed
    fr             = params['imaging_fps'] # imaging rate in frames per second
    decay_time     = params['decay_time']  # length of a typical transient in seconds
    
    # parameters for source extraction and deconvolution
    p              = params['autoregressive_order']             # order of the autoregressive system
    gnb            = params['num_bg_components']                # number of global background components
    merge_thresh   = params['merge_threshold']                  # merging threshold, max correlation allowed
    rf             = None                                       # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf    = 6                                          # amount of overlap between the patches in pixels
    K              = roi_spatial_footprints.shape[1]                   # number of components per patch
    gSig           = [params['half_size'], params['half_size']] # expected half size of neurons
    init_method    = 'greedy_roi'                               # initialization method (if analyzing dendritic data using 'sparse_nmf')
    rolling_sum    = True
    rolling_length = 50
    is_dendrites   = False                                      # flag for analyzing dendritic data
    alpha_snmf     = None                                       # sparsity penalty for dendritic data analysis through sparse NMF

    # parameters for component evaluation
    min_SNR        = params['min_snr']          # signal to noise ratio for accepting a component
    rval_thr       = params['min_spatial_corr'] # space correlation threshold for accepting a component
    cnn_thr        = params['cnn_threshold']    # threshold for CNN based classifier

    border_pix = 0

    fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C') # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')


    cnm = cnmf.CNMF(n_processes=8, k=K, gSig=gSig, merge_thresh= merge_thresh, 
                    p = p,  dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
                    method_init=init_method, alpha_snmf=alpha_snmf, rolling_sum=rolling_sum,
                    only_init_patch = True, skip_refinement=True, gnb = gnb, border_pix = border_pix, ssub=1, ssub_B=1, tsub=1, Ain=roi_spatial_footprints, Cin=roi_temporal_footprints, b_in=bg_spatial_footprints, f_in=bg_temporal_footprints, do_merge=False)

    cnm = cnm.fit(images)

    roi_spatial_footprints  = cnm.A
    roi_temporal_footprints = cnm.C
    roi_temporal_residuals  = cnm.YrA
    bg_spatial_footprints   = cnm.b
    bg_temporal_footprints  = cnm.f

    if use_multiprocessing:
        if backend == 'multiprocessing':
            dview.close()
        else:
            try:
                dview.terminate()
            except:
                dview.shutdown()
        cm.stop_server()

    return roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints
Beispiel #17
0
        raise Exception('dendritic requires sparse_nmf')
    if params_movie['alpha_snmf'] is None:
        raise Exception('need to set a value for alpha_snmf')
# %% Extract spatial and temporal components on patches
t1 = time.time()
border_pix = 0  # if motion correction introduces problems at the border remove pixels from border
# TODO: todocument
# TODO: warnings 3
cnm = cnmf.CNMF(n_processes=1,
                k=K,
                gSig=gSig,
                merge_thresh=params_movie['merge_thresh'],
                p=params_movie['p'],
                dview=dview,
                rf=rf,
                stride=stride_cnmf,
                memory_fact=1,
                method_init=init_method,
                alpha_snmf=alpha_snmf,
                only_init_patch=params_movie['only_init_patch'],
                gnb=params_movie['gnb'],
                method_deconvolution='oasis',
                border_pix=border_pix,
                low_rank_background=params_movie['low_rank_background'])
cnm = cnm.fit(images)

A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
    # %% Extract spatial and temporal components
    # TODO: todocument
    t1 = time.time()
    if images.shape[0] > 10000:
        check_nan = False
    else:
        check_nan = True

    cnm = cnmf.CNMF(check_nan=check_nan,
                    n_processes=1,
                    k=A_in.shape[-1],
                    gSig=[radius, radius],
                    merge_thresh=params_movie['merge_thresh'],
                    p=params_movie['p'],
                    Ain=A_in.astype(np.bool),
                    dview=dview,
                    rf=None,
                    stride=None,
                    gnb=params_movie['gnb'],
                    method_deconvolution='oasis',
                    border_pix=0,
                    low_rank_background=params_movie['low_rank_background'],
                    n_pixels_per_process=1000)
    cnm = cnm.fit(images)

    A = cnm.A
    C = cnm.C
    YrA = cnm.YrA
    b = cnm.b
    f = cnm.f
    sn = cnm.sn
Beispiel #19
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    #%% save files to be processed

    # This datafile is distributed with Caiman
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # location of dataset  (can actually be a list of filed to be concatenated)
    add_to_movie = -np.min(cm.load(fnames[0],
                                   subindices=range(200))).astype(float)
    # determine minimum value on a small chunk of data
    add_to_movie = np.maximum(add_to_movie, 0)
    # if minimum is negative subtract to make the data non-negative
    base_name = 'Yr'
    fname_new = cm.save_memmap(fnames,
                               dview=dview,
                               base_name=base_name,
                               order='C',
                               add_to_movie=add_to_movie)

    #%% LOAD MEMORY MAPPABLE FILE
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    #%% play movie, press q to quit
    play_movie = False
    if play_movie:
        cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

#%% correlation image. From here infer neuron size and density
    Cn = cm.movie(images).local_correlations(swap_dim=False)
    plt.imshow(Cn, cmap='gray')
    plt.title('Correlation Image')

    #%% set up some parameters

    is_patches = True  # flag for processing in patches or not

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    #%% Now RUN CNMF
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    gnb=gnb,
                    rf=rf,
                    stride=stride,
                    rolling_sum=False)
    cnm = cnm.fit(images)

    #%% plot contour plots of components

    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of components')

    #%%
    A_in, C_in, b_in, f_in = cnm.A[:, :], cnm.C[:], cnm.b, cnm.f
    cnm2 = cnmf.CNMF(n_processes=1,
                     k=A_in.shape[-1],
                     gSig=gSig,
                     p=p,
                     dview=dview,
                     merge_thresh=merge_thresh,
                     Ain=A_in,
                     Cin=C_in,
                     b_in=b_in,
                     f_in=f_in,
                     rf=None,
                     stride=None,
                     gnb=gnb,
                     method_deconvolution='oasis',
                     check_nan=True)

    cnm2 = cnm2.fit(images)
    #%% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient
    min_SNR = 2.5  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.90  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.95  # if cnn classifier predicts below this value, reject

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=use_cnn,
                                         thresh_cnn_min=min_cnn_thr)
    #%% visualize selected and rejected components
    plt.figure()
    plt.subplot(1, 2, 1)
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components],
                                         Cn,
                                         thr=0.9)
    plt.title('Selected components')
    plt.subplot(1, 2, 2)
    plt.title('Discaded components')
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components_bad],
                                         Cn,
                                         thr=0.9)

    #%%
    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm2.A.tocsc()[:,
                                                              idx_components],
                                               Cn,
                                               thr=0.9)
    plt.title('Contour plots of components')
    #%% visualize selected components
    cm.utils.visualization.view_patches_bar(Yr,
                                            cnm2.A.tocsc()[:, idx_components],
                                            cnm2.C[idx_components, :],
                                            cnm2.b,
                                            cnm2.f,
                                            dims[0],
                                            dims[1],
                                            YrA=cnm2.YrA[idx_components, :],
                                            img=Cn)
    #%% STOP CLUSTER and clean up log files
    cm.stop_server()

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Beispiel #20
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    # %% set up some parameters
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # file(s) to be analyzed
    is_patches = True  # flag for processing in patches or not
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    params_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'rf': rf,
        'stride': stride,
        'K': K,
        'gSig': gSig,
        'merge_thr': merge_thresh,
        'p': p,
        'nb': gnb
    }

    opts = params.CNMFParams(params_dict=params_dict)
    # %% Now RUN CaImAn Batch (CNMF)
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit_file()

    # %% plot contour plots of components
    Cns = local_correlations_movie_offline(fnames[0],
                                           remove_baseline=True,
                                           swap_dim=False,
                                           window=1000,
                                           stride=1000,
                                           winSize_baseline=100,
                                           quantil_min_baseline=10,
                                           dview=dview)
    Cn = Cns.max(axis=0)
    cnm.estimates.plot_contours(img=Cn)

    # %% load memory mapped file
    Yr, dims, T = cm.load_memmap(cnm.mmap_file)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    # %% refit
    cnm2 = cnm.refit(images, dview=dview)

    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)

    min_SNR = 2  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.85  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.99  # if cnn classifier predicts below this value, reject
    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected

    cnm2.params.set(
        'quality', {
            'min_SNR': min_SNR,
            'rval_thr': rval_thr,
            'use_cnn': use_cnn,
            'min_cnn_thr': min_cnn_thr,
            'cnn_lowest': cnn_lowest
        })

    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% visualize selected and rejected components
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    # %% visualize selected components
    cnm2.estimates.view_components(images,
                                   idx=cnm2.estimates.idx_components,
                                   img=Cn)
    #%% only select high quality components (destructive)
    # cnm2.estimates.select_components(use_object=True)
    # cnm2.estimates.plot_contours(img=Cn)
    #%% save results
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')

    # %% play movie with results (original, reconstructed, amplified residual)
    cnm2.estimates.play_movie(images, magnification=4)

    # %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Beispiel #21
0
# %% Extract spatial and temporal components on patches
# TODO: todocument
if images.shape[0] > 10000:
    check_nan = False
else:
    check_nan = True

cnm = cnmf.CNMF(check_nan=check_nan,
                n_processes=1,
                k=A_in.shape[-1],
                gSig=[radius, radius],
                merge_thresh=params_movie['merge_thresh'],
                p=params_movie['p'],
                Ain=A_in.astype(np.bool),
                dview=dview,
                rf=None,
                stride=None,
                gnb=params_movie['gnb'],
                method_deconvolution='oasis',
                border_pix=0,
                low_rank_background=params_movie['low_rank_background'],
                n_pixels_per_process=1000)
cnm = cnm.fit(images)

A = cnm.A
C = cnm.C
YrA = cnm.YrA
b = cnm.b
f = cnm.f
snt = cnm.sn
Beispiel #22
0
def cnmf_patches(args_in):
    import numpy as np
    import caiman as cm
    import time
    import logging
    from caiman.source_extraction.cnmf import cnmf as cnmf

    #    file_name, idx_,shapes,p,gSig,K,fudge_fact=args_in
    file_name, idx_, shapes, options = args_in

    name_log = os.path.basename(file_name[:-5]) + '_LOG_ ' + str(
        idx_[0]) + '_' + str(idx_[-1])
    logger = logging.getLogger(name_log)
    hdlr = logging.FileHandler('./' + name_log)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)

    p = options['temporal_params']['p']

    logger.info('START')

    logger.info('Read file')
    Yr, _, _ = load_memmap(file_name)

    Yr = Yr[idx_, :]

    if (np.sum(np.abs(np.diff(Yr)))) > 0.1:

        #Yr.filename=file_name
        d, T = Yr.shape

        #        Y=np.reshape(Yr,(shapes[1],shapes[0],T),order='F')
        #        Y.filename=file_name

        #        dims = shapes[1],shapes[0]
        dims = shapes  #shapes[1],shapes[0],shapes[2]
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        #images.filename=file_name

        cnm = cnmf.CNMF(n_processes = 1, k = options['init_params']['K'], gSig = options['init_params']['gSig'], merge_thresh = options['merging']['thr'], p = p, dview = None,  Ain = None,  Cin = None, f_in = None, do_merge = True,\
                                        ssub = options['init_params']['ssub'], tsub = options['init_params']['ssub'], p_ssub = 1, p_tsub = 1, method_init = options['init_params']['method'], alpha_snmf = options['init_params']['alpha_snmf'],\
                                        rf=None,stride=None, memory_fact=1, gnb = options['init_params']['nb'],\
                                        only_init_patch = options['patch_params']['only_init']\
                                        ,method_deconvolution =  options['temporal_params']['method'], n_pixels_per_process = options['preprocess_params']['n_pixels_per_process'],\
                                        block_size = options['temporal_params']['block_size'], check_nan = options['preprocess_params']['check_nan'],\
                                        skip_refinement = options['patch_params']['skip_refinement'],N_iterations_refinement=options['patch_params']['nIter'])

        cnm = cnm.fit(images)

        Yr = []
        images = []

        return idx_, shapes, scipy.sparse.coo_matrix(
            cnm.A
        ), cnm.b, cnm.C, cnm.f, cnm.S, cnm.bl, cnm.c1, cnm.neurons_sn, cnm.g, cnm.sn, cnm.options, cnm.YrA.T

#        [d1,d2,T]=Y.shape
#
#        options['spatial_params']['dims']=(d1,d2)
#        logger.info('Preprocess Data')
#        Yr,sn,g,psx=cm.source_extraction.cnmf.pre_processing.preprocess_data(Yr,**options['preprocess_params'])
#
#
#        logger.info('Initialize Components')
#
#        Ain, Cin, b_in, f_in, center=cm.source_extraction.cnmf.initialization.initialize_components(Y, **options['init_params'])
#
#        nA = np.squeeze(np.array(np.sum(np.square(Ain),axis=0)))
#
#        nr=nA.size
#        Cin=coo_matrix(Cin)
#
#
#        YA = (Ain.T.dot(Yr).T)*scipy.sparse.spdiags(old_div(1.,nA),0,nr,nr)
#        AA = ((Ain.T.dot(Ain))*scipy.sparse.spdiags(old_div(1.,nA),0,nr,nr))
#        YrA = YA - Cin.T.dot(AA)
#        Cin=Cin.todense()
#
#        if options['patch_params']['only_init']:
#
#            return idx_,shapes, coo_matrix(Ain), b_in, Cin, f_in, None, None , None, None, g, sn, options, YrA.T
#
#        else:
#
#            raise Exception('Bug here, need to double check. For now set ["patch_params"]["only_init"] = True')
#            logger.info('Spatial Update')
#            A,b,Cin, f_in = cm.source_extraction.cnmf.spatial.update_spatial_components(Yr, Cin, f_in, Ain, sn=sn, **options['spatial_params'])
#            options['temporal_params']['p'] = 0 # set this to zero for fast updating without deconvolution
#
#            import pdb
#            pdb.set_trace()
#
#            logger.info('Temporal Update')
#            C,f,S,bl,c1,neurons_sn,g,YrA = cm.source_extraction.cnmf.temporal.update_temporal_components(Yr,A,b,Cin,f_in,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
#
#            logger.info('Merge Components')
#            A_m,C_m,nr_m,merged_ROIs,S_m,bl_m,c1_m,sn_m,g_m=cm.source_extraction.cnmf.merging.merge_components(Yr,A,b,C,f,S,sn,options['temporal_params'], options['spatial_params'], bl=bl, c1=c1, sn=neurons_sn, g=g, thr=options['merging']['thr'], fast_merge = True)
#
#            logger.info('Update Spatial II')
#            A2,b2,C2,f = cm.source_extraction.cnmf.spatial.update_spatial_components(Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
#
#            logger.info('Update Temporal II')
#            options['temporal_params']['p'] = p # set it back to original value to perform full deconvolution
#            C2,f2,S2,bl2,c12,neurons_sn2,g21,YrA = cm.source_extraction.cnmf.temporal.update_temporal_components(Yr,A2,b2,C2,f,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
#
#
#            Y=[]
#            Yr=[]
#
#            logger.info('Done!')
#            return idx_,shapes,A2,b2,C2,f2,S2,bl2,c12,neurons_sn2,g21,sn,options,YrA

    else:
        return None
Beispiel #23
0
    raise Exception('Movie contains nan! You did not remove enough borders')
#%%
Cn = cm.local_correlations(Y[:, :, :3000])
pl.imshow(Cn, cmap='gray')

#%%
if not is_patches:
    K = 35  # number of neurons expected per patch
    gSig = [7, 7]  # expected half size of neurons
    merge_thresh = 0.8  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    cnm = cnmf.CNMF(n_processes,
                    method_init=init_method,
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    Ain=None,
                    method_deconvolution='oasis',
                    skip_refinement=False)
    cnm = cnm.fit(images)
    crd = plot_contours(cnm.A, Cn, thr=0.9)
    C_dff = extract_DF_F(Yr,
                         cnm.A,
                         cnm.C,
                         cnm.bl,
                         quantileMin=8,
                         frames_window=200,
                         dview=dview)
    pl.figure()
    pl.plot(C_dff.T)
Beispiel #24
0
def demix_and_deconvolve_with_cnmf(scan, num_components=200, AR_order=2,
                                   merge_threshold=0.8, num_processes=20,
                                   num_pixels_per_process=5000, block_size=10000,
                                   num_background_components=4, init_method='greedy_roi',
                                   soma_radius=(5, 5), snmf_alpha=None,
                                   init_on_patches=False, patch_downsampling_factor=None,
                                   percentage_of_patch_overlap=None):
    """ Extract spike train activity from multi-photon scans using CNMF.

    Uses constrained non-negative matrix factorization to find neurons/components
    (locations) and their fluorescence traces (activity) in a timeseries of images, and
    deconvolves them using an autoregressive model of the calcium impulse response
    function. See Pnevmatikakis et al., 2016 for details.

    Default values work alright for somatic images.

    :param np.array scan: 3-dimensional scan (image_height, image_width, num_frames).
    :param int num_components: An estimate of neurons/spatial components in the scan.
    :param int AR_order: Order of the autoregressive process used to model the impulse
        response function, e.g., 0 = no modelling; 2 = model rise plus exponential decay.
    :param int merge_threshold: Maximal temporal correlation allowed between activity of
        overlapping components before merging them.
    :param int num_processes: Number of processes to run in parallel. None for as many
        processes as available cores.
    :param int num_pixels_per_process: Number of pixels that a process handles each
        iteration.
    :param int block_size: 'number of pixels to process at the same time for dot product'
    :param int num_background_components:  Number of background components to use.
    :param string init_method: Initialization method for the components.
        'greedy_roi':Look for a gaussian-shaped patch, apply rank-1 NMF, store components,
            calculate residual scan and repeat for num_components.
        'sparse_nmf': Regularized non-negative matrix factorization (as impl. in sklearn)
        'local_nmf': ...
    :param (float, float) soma_radius: Estimated neuron radius (in pixels) in y and x.
        Used in'greedy_roi' initialization to define the size of the gaussian window.
    :param int snmf_alpha: Regularization parameter (alpha) for the sparse NMF (if used).
    :param bool init_on_patches: If True, run the initialization methods on small patches
        of the scan rather than on the whole image.
    :param int patch_downsampling_factor: Division to the image dimensions to obtain patch
        dimensions, e.g., if original size is 256 and factor is 10, patches will be 26x26
    :param int percentage_of_patch_overlap: Patches are sampled in a sliding window. This
        controls how much overlap is between adjacent patches (0 for none, 0.9 for 90%)

    :returns Location matrix (image_height x image_width x num_components). Inferred
        location of each component.
    :returns Activity matrix (num_components x num_frames). Inferred fluorescence traces
         (spike train convolved with the fitted impulse response function).
    :returns: Inferred location matrix for background components (image_height x
         image_width x num_background_components).
    :returns: Inferred activity matrix for background components (image_height x
        image_width x num_background_components).
    :returns: Raw fluorescence traces (num_components x num_frames) obtained from the
        scan minus activity from background and other components.
    :returns: Spike matrix (num_components x num_frames). Deconvolved spike activity.
    :returns: Autoregressive process coefficients (num_components x AR_order) used to
        model the calcium impulse response of each component:
            c(t) = c(t-1) * AR_coeffs[0] + c(t-2) * AR_coeffs[1] + ...

    ..note:: Based on code provided by Andrea Giovanucci.
    ..note:: The produced number of components is not exactly what you ask for because
        some components will be merged or deleted.
    ..warning:: Computation- and memory-intensive for big scans.
    """
    import caiman
    from caiman.source_extraction.cnmf import cnmf

    # Save as memory mapped file in F order (that's how caiman wants it)
    mmap_filename = _save_as_memmap(scan, base_name='/tmp/caiman', order='F').filename

    # 'Load' scan
    mmap_scan, (image_height, image_width), num_frames = caiman.load_memmap(mmap_filename)
    images = np.reshape(mmap_scan.T, (num_frames, image_height, image_width), order='F')

    # Start the ipyparallel cluster
    client, direct_view, num_processes = caiman.cluster.setup_cluster(
        n_processes=num_processes)

    # Optionally, run the initialization method in small patches to initialize components
    initial_A = None
    initial_C = None
    initial_f = None
    if init_on_patches:
        # Calculate patch size (only square patches allowed)
        bigger_dimension = max(image_height, image_width)
        smaller_dimension = min(image_height, image_width)
        patch_size = bigger_dimension / patch_downsampling_factor
        patch_size = min(patch_size, smaller_dimension) # if bigger than small dimension

        # Calculate num_components_per_patch
        num_nonoverlapping_patches = (image_height/patch_size) * (image_width/patch_size)
        num_components_per_patch = num_components / num_nonoverlapping_patches
        num_components_per_patch = max(num_components_per_patch, 1) # at least 1

        # Calculate patch overlap in pixels
        overlap_in_pixels = patch_size * percentage_of_patch_overlap

        # Make sure they are integers
        patch_size = int(round(patch_size))
        num_components_per_patch = int(round(num_components_per_patch))
        overlap_in_pixels = int(round(overlap_in_pixels))

        # Run CNMF on patches (only for initialization, no impulse response modelling p=0)
        model = cnmf.CNMF(num_processes, only_init_patch=True, p=0,
                          rf=int(round(patch_size / 2)), stride=overlap_in_pixels,
                          k=num_components_per_patch, merge_thresh=merge_threshold,
                          method_init=init_method, gSig=soma_radius,
                          alpha_snmf=snmf_alpha, gnb=num_background_components,
                          n_pixels_per_process=num_pixels_per_process,
                          block_size=block_size, check_nan=False, dview=direct_view,
                          method_deconvolution='cvxpy')
        model = model.fit(images)

        # Delete log files (one per patch)
        log_files = glob.glob('caiman*_LOG_*')
        for log_file in log_files:
            os.remove(log_file)

        # Get results
        initial_A = model.A
        initial_C = model.C
        initial_f = model.f

    # Run CNMF
    model = cnmf.CNMF(num_processes, k=num_components, p=AR_order,
                      merge_thresh=merge_threshold, gnb=num_background_components,
                      method_init=init_method, gSig=soma_radius, alpha_snmf=snmf_alpha,
                      n_pixels_per_process=num_pixels_per_process, block_size=block_size,
                      check_nan=False, dview=direct_view, Ain=initial_A, Cin=initial_C,
                      f_in=initial_f, method_deconvolution='cvxpy')
    model = model.fit(images)

    # Get final results
    location_matrix = model.A  # pixels x num_components
    activity_matrix = model.C  # num_components x num_frames
    background_location_matrix = model.b  # pixels x num_background_components
    background_activity_matrix = model.f  # num_background_components x num_frames
    spikes = model.S  # num_components x num_frames, spike_ traces
    raw_traces = model.C + model.YrA  # num_components x num_frames
    AR_coefficients = model.g  # AR_order x num_components

    # Reshape spatial matrices to be image_height x image_width x num_frames
    new_shape = (image_height, image_width, -1)
    location_matrix = location_matrix.toarray().reshape(new_shape, order='F')
    background_location_matrix = background_location_matrix.reshape(new_shape, order='F')
    AR_coefficients = np.array(list(AR_coefficients))  # unwrapping it (num_components x 2)

    # Stop ipyparallel cluster
    client.close()
    caiman.stop_server()

    # Delete memory mapped scan
    os.remove(mmap_filename)

    return (location_matrix, activity_matrix, background_location_matrix,
            background_activity_matrix, raw_traces, spikes, AR_coefficients)
Beispiel #25
0
        'ssub': global_params['ssub'],
        'tsub': global_params['tsub'],
        'thr_method': 'nrg'
    }

    init_method = global_params['init_method']

    opts = params.CNMFParams(params_dict=params_dict)
    if reload:
        cnm2 = load_CNMF(fname_new[:-5] + '_cnmf_gsig.hdf5')

    else:
        # %% Extract spatial and temporal components on patches
        t1 = time.time()
        print('Starting CNMF')
        cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
        cnm = cnm.fit(images)
        t_patch = time.time() - t1
        # %%
        try:
            dview.terminate()
        except:
            pass
        c, dview, n_processes = cm.cluster.setup_cluster(
            backend=backend_refine,
            n_processes=n_processes,
            single_thread=False)
        # %%
        if plot_on:
            cnm.estimates.plot_contours(img=Cn)
Beispiel #26
0
    if np.min(images) < 0:
        raise Exception('Movie too negative, add_to_movie should be larger')

    #%% correlation image. From here infer neuron size and density

    Cn = cm.movie(images)[:3000].local_correlations(swap_dim=False)

    #%% run  seeded CNMF

    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=Ain.shape[1],
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    Ain=Ain,
                    method_deconvolution='oasis',
                    rolling_sum=False,
                    rf=None)
    cnm = cnm.fit(images)
    A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn

    #%% plot contours of components

    pl.figure()
    crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
    pl.title('Contour plots against correlation image')

    #%% evaluate the quality of the components
Beispiel #27
0
def test_general():
    """  General Test of pipeline with comparison against ground truth
    A shorter version than the demo pipeline that calls comparison for the real test work



        Raises:
      ---------
        params_movie

        params_cnmf

        rig correction

        cnmf on patch

        cnmf full frame

        not able to read the file

        no groundtruth


    """
    #\bug
    #\warning

    global params_movie
    global params_diplay
    fname = params_movie['fname']
    niter_rig = params_movie['niter_rig']
    max_shifts = params_movie['max_shifts']
    splits_rig = params_movie['splits_rig']
    num_splits_to_process_rig = params_movie['num_splits_to_process_rig']

    cwd = os.getcwd()
    fname = download_demo(fname[0])
    m_orig = cm.load(fname)
    min_mov = m_orig[:400].min()
    comp = comparison.Comparison()
    comp.dims = np.shape(m_orig)[1:]

    ################ RIG CORRECTION #################
    t1 = time.time()
    mc = MotionCorrect(fname,
                       min_mov,
                       max_shifts=max_shifts,
                       niter_rig=niter_rig,
                       splits_rig=splits_rig,
                       num_splits_to_process_rig=num_splits_to_process_rig,
                       shifts_opencv=True,
                       nonneg_movie=True)
    mc.motion_correct_rigid(save_movie=True)
    m_rig = cm.load(mc.fname_tot_rig)
    bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
    comp.comparison['rig_shifts']['timer'] = time.time() - t1
    comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig
    ###########################################

    if 'max_shifts' not in params_movie:
        fnames = params_movie['fname']
        border_to_0 = 0
    else:  # elif not params_movie.has_key('overlaps'):
        fnames = mc.fname_tot_rig
        border_to_0 = bord_px_rig
        m_els = m_rig

    idx_xy = None
    add_to_movie = -np.nanmin(m_els) + 1  # movie must be positive
    remove_init = 0
    downsample_factor = 1
    base_name = fname[0].split('/')[-1][:-4]
    name_new = cm.save_memmap_each(fnames,
                                   base_name=base_name,
                                   resize_fact=(1, 1, downsample_factor),
                                   remove_init=remove_init,
                                   idx_xy=idx_xy,
                                   add_to_movie=add_to_movie,
                                   border_to_0=border_to_0)
    name_new.sort()

    if len(name_new) > 1:
        fname_new = cm.save_memmap_join(name_new,
                                        base_name='Yr',
                                        n_chunks=params_movie['n_chunks'],
                                        dview=None)
    else:
        logging.warning('One file only, not saving!')
        fname_new = name_new[0]

    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Y = np.reshape(Yr, dims + (T, ), order='F')

    if np.min(images) < 0:
        # TODO: should do this in an automatic fashion with a while loop at the 367 line
        raise Exception('Movie too negative, add_to_movie should be larger')
    if np.sum(np.isnan(images)) > 0:
        # TODO: same here
        raise Exception(
            'Movie contains nan! You did not remove enough borders')

    Cn = cm.local_correlations(Y)
    Cn[np.isnan(Cn)] = 0
    p = params_movie['p']
    merge_thresh = params_movie['merge_thresh']
    rf = params_movie['rf']
    stride_cnmf = params_movie['stride_cnmf']
    K = params_movie['K']
    init_method = params_movie['init_method']
    gSig = params_movie['gSig']
    alpha_snmf = params_movie['alpha_snmf']

    if params_movie['is_dendrites'] == True:
        if params_movie['init_method'] is not 'sparse_nmf':
            raise Exception('dendritic requires sparse_nmf')
        if params_movie['alpha_snmf'] is None:
            raise Exception('need to set a value for alpha_snmf')

################ CNMF PART PATCH #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1,
                    k=K,
                    gSig=gSig,
                    merge_thresh=params_movie['merge_thresh'],
                    p=params_movie['p'],
                    dview=None,
                    rf=rf,
                    stride=stride_cnmf,
                    memory_fact=params_movie['memory_fact'],
                    method_init=init_method,
                    alpha_snmf=alpha_snmf,
                    only_init_patch=params_movie['only_init_patch'],
                    gnb=params_movie['gnb'],
                    method_deconvolution='oasis')
    comp.cnmpatch = copy.copy(cnm)
    comp.cnmpatch.estimates = None
    cnm = cnm.fit(images)
    A_tot = cnm.estimates.A
    C_tot = cnm.estimates.C
    YrA_tot = cnm.estimates.YrA
    b_tot = cnm.estimates.b
    f_tot = cnm.estimates.f
    # DISCARDING
    logging.info(('Number of components:' + str(A_tot.shape[-1])))
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_patch']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_patch']
    fitness_delta_min = params_movie['fitness_delta_min_patch']
    Npeaks = params_movie['Npeaks']
    traces = C_tot + YrA_tot
    idx_components, idx_components_bad = estimate_components_quality(
        traces,
        Y,
        A_tot,
        C_tot,
        b_tot,
        f_tot,
        final_frate=final_frate,
        Npeaks=Npeaks,
        r_values_min=r_values_min,
        fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min)
    #######
    A_tot = A_tot.tocsc()[:, idx_components]
    C_tot = C_tot[idx_components]
    comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1
    comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()]
    #################### ########################

    ################ CNMF PART FULL #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1,
                    k=A_tot.shape,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    Ain=A_tot,
                    Cin=C_tot,
                    f_in=f_tot,
                    rf=None,
                    stride=None,
                    method_deconvolution='oasis')
    cnm = cnm.fit(images)
    # DISCARDING
    A, C, b, f, YrA, sn = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, cnm.estimates.YrA, cnm.estimates.sn
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_full']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_full']
    fitness_delta_min = params_movie['fitness_delta_min_full']
    Npeaks = params_movie['Npeaks']
    traces = C + YrA
    idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality(
        traces,
        Y,
        A,
        C,
        b,
        f,
        final_frate=final_frate,
        Npeaks=Npeaks,
        r_values_min=r_values_min,
        fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min,
        return_all=True)
    ##########
    A_tot_full = A_tot.tocsc()[:, idx_components]
    C_tot_full = C_tot[idx_components]
    comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1
    comp.comparison['cnmf_full_frame']['ourdata'] = [
        A_tot_full.copy(), C_tot_full.copy()
    ]
    #################### ########################
    comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn)
    log_files = glob.glob('*_LOG_*')
    try:
        for log_file in log_files:
            os.remove(log_file)
    except:
        logging.warning('Cannot remove log files')
############ assertions ##################
    pb = False
    if (comp.information['differences']['params_movie']):
        logging.error(
            "you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)"
        )
        pb = True
    if (comp.information['differences']['params_cnm']):
        logging.warning(
            "you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)"
        )
        # pb = True
    if (comp.information['diff']['rig']['isdifferent']):
        logging.error("the rigid shifts are different from the groundtruth ")
        pb = True
    if (comp.information['diff']['cnmpatch']['isdifferent']):
        logging.error(
            "the cnmf on patch produces different results than the groundtruth "
        )
        pb = True
    if (comp.information['diff']['cnmfull']['isdifferent']):
        logging.error(
            "the cnmf full frame produces different  results than the groundtruth "
        )
        pb = True

    assert (not pb)
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                 n_processes=None,
                                                 single_thread=False)

#%% RUN CNMF ON PATCHES
# First extract spatial and temporal components on patches and combine them
# for this step deconvolution is turned off (p=0)
t1 = time.time()

cnm = cnmf.CNMF(n_processes=1,
                k=K,
                gSig=gSig,
                merge_thresh=merge_thresh,
                p=0,
                dview=dview,
                rf=rf,
                stride=stride_cnmf,
                memory_fact=1,
                method_init=init_method,
                alpha_snmf=alpha_snmf,
                only_init_patch=False,
                gnb=gnb,
                border_pix=border_to_0)
cnm = cnm.fit(images)

#%% plot contours of found components
#Cn = cm.local_correlations(images.transpose(1, 2, 0))
#Cn[np.isnan(Cn)] = 0
plt.figure()
crd = plot_contours(cnm.A, Cn, thr=0.9)
plt.title('Contour plots of found components')
def main():
    pass # For compatibility between running under Spyder and the CLI

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

# %% set up some parameters
    fnames = [os.path.join(caiman_datadir(), 'split', 'first3000-ch1.tif'),
              os.path.join(caiman_datadir(), 'split', 'second3000-ch1.tif')]

    is_patches = True       # flag for processing in patches or not
    fr = 1.5                  # approximate frame rate of data
    decay_time = 5.0        # length of transient

    if is_patches:          # PROCESS IN PATCHES AND THEN COMBINE
        rf = 20             # half size of each patch
        stride = 4         # overlap between patches
        K = 2               # number of components in each patch
    else:                   # PROCESS THE WHOLE FOV AT ONCE
        rf = None           # setting these parameters to None
        stride = None       # will run CNMF on the whole FOV
        K = 10              # number of neurons expected (in the whole FOV)

    gSig = [6, 6]           # expected half size of neurons
    merge_thresh = 0.80     # merging threshold, max correlation allowed
    p = 2                   # order of the autoregressive system
    gnb = 2                 # global background order

    params_dict = {'fnames': fnames,
                   'fr': fr,
                   'decay_time': decay_time,
                   'rf': rf,
                   'stride': stride,
                   'K': K,
                   'gSig': gSig,
                   'merge_thr': merge_thresh,
                   'p': p,
                   'nb': gnb}

    opts = params.CNMFParams(params_dict=params_dict)
    # %% Now RUN CaImAn Batch (CNMF)
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #cnm.estimates.normalize_components()
    cnm = cnm.fit_file()

    # %% plot contour plots of components
    Cn = cm.load(fnames[0], subindices=slice(1000)).local_correlations(swap_dim=False)
    cnm.estimates.plot_contours(img=Cn)

    # %% load memory mapped file
    Yr, dims, T = cm.load_memmap(cnm.mmap_file)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    # %% refit
    cnm2 = cnm.refit(images, dview=dview)

# %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)

    min_SNR = 2      # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.85     # space correlation threshold (if above this, accept)
    use_cnn = False      # use the CNN classifier
    min_cnn_thr = 0.99  # if cnn classifier predicts below this value, reject
    cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected

    cnm2.params.set('quality', {'min_SNR': min_SNR,
                                'rval_thr': rval_thr,
                                'use_cnn': use_cnn,
                                'min_cnn_thr': min_cnn_thr,
                                'cnn_lowest': cnn_lowest})

    cnm2.estimates.detrend_df_f()
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% visualize selected and rejected components
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    # %% visualize selected components
    cnm2.estimates.nb_view_components(images, idx=cnm2.estimates.idx_components, img=Cn)
    cnm2.estimates.view_components(images, idx=cnm2.estimates.idx_components_bad, img=Cn)
    #%% only select high quality components
    cnm2.estimates.select_components(use_object=True)
    #%%
    cnm2.estimates.plot_contours(img=Cn)

    cnm2.estimates.detrend_df_f()
    import pickle
    f = open("/home/david/zebraHorse/df_f_day55.pkl", "wb")
    pickle.dump(cnm2.estimates.F_dff, f)
    f.close()
    
    # %% play movie with results (original, reconstructed, amplified residual)
    for j in range(10):
        cnm2.estimates.play_movie(images, magnification=4.0, frame_range = range(100 * j, 100 * (j + 1)))

    #import time
    #time.sleep(1000)
    
# %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
def find_rois_cnmf(video_path, params, mc_borders=None, use_multiprocessing=True):
    full_video_path = video_path

    directory = os.path.dirname(full_video_path)
    filename  = os.path.basename(full_video_path)

    memmap_video = tifffile.memmap(video_path)
    print(memmap_video.shape)

    if len(memmap_video.shape) == 5:
        num_z = memmap_video.shape[2]
    else:
        num_z = memmap_video.shape[1]

    roi_spatial_footprints  = [ None for i in range(num_z) ]
    roi_temporal_footprints = [ None for i in range(num_z) ]
    roi_temporal_residuals  = [ None for i in range(num_z) ]
    bg_spatial_footprints   = [ None for i in range(num_z) ]
    bg_temporal_footprints  = [ None for i in range(num_z) ]

    # Create the cluster
    if use_multiprocessing:
        if os.name == 'nt':
            backend = 'multiprocessing'
        else:
            backend = 'ipyparallel'

        cm.stop_server()
        c, dview, n_processes = cm.cluster.setup_cluster(backend=backend, n_processes=None, single_thread=False)
    else:
        dview = None

    for z in range(num_z):
        fname = os.path.splitext(filename)[0] + "_masked_z_{}.tif".format(z)

        video_path = os.path.join(directory, fname)

        if len(memmap_video.shape) == 5:
            tifffile.imsave(video_path, memmap_video[:, :, z, :, :].reshape((-1, memmap_video.shape[3], memmap_video.shape[4])))
        else:
            tifffile.imsave(video_path, memmap_video[:, z, :, :])

        # dataset dependent parameters
        fnames         = [video_path]          # filename to be processed
        fr             = params['imaging_fps'] # imaging rate in frames per second
        decay_time     = params['decay_time']  # length of a typical transient in seconds
        
        # parameters for source extraction and deconvolution
        p              = params['autoregressive_order']             # order of the autoregressive system
        gnb            = params['num_bg_components']                # number of global background components
        merge_thresh   = params['merge_threshold']                  # merging threshold, max correlation allowed
        rf             = None                                       # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
        stride_cnmf    = 6                                          # amount of overlap between the patches in pixels
        K              = params['num_components']                   # number of components per patch
        gSig           = [params['half_size'], params['half_size']] # expected half size of neurons
        init_method    = 'greedy_roi'                               # initialization method (if analyzing dendritic data using 'sparse_nmf')
        rolling_sum    = True
        rolling_length = 50
        is_dendrites   = False                                      # flag for analyzing dendritic data
        alpha_snmf     = None                                       # sparsity penalty for dendritic data analysis through sparse NMF

        # parameters for component evaluation
        min_SNR        = params['min_snr']          # signal to noise ratio for accepting a component
        rval_thr       = params['min_spatial_corr'] # space correlation threshold for accepting a component
        cnn_thr        = params['cnn_threshold']    # threshold for CNN based classifier

        if mc_borders is not None:
            border_pix = mc_borders[z]
        else:
            border_pix = 0

        fname_new = cm.save_memmap(fnames, base_name='memmap_z_{}'.format(z), order='C') # exclude borders

        # now load the file
        Yr, dims, T = cm.load_memmap(fname_new)
        d1, d2 = dims
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh= merge_thresh, 
                        p = p,  dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
                        method_init=init_method, alpha_snmf=alpha_snmf, rolling_sum=rolling_sum,
                        only_init_patch = False, gnb = gnb, border_pix = border_pix, ssub=1, ssub_B=1, tsub=1)

        cnm = cnm.fit(images)

        try:
            A_in, C_in, b_in, f_in = cnm.A, cnm.C, cnm.b, cnm.f
        except:
            A_in, C_in, b_in, f_in = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f

        cnm2 = cnmf.CNMF(n_processes=1, k=A_in.shape[-1], gSig=gSig, p=p, dview=dview,
                        merge_thresh=merge_thresh,  Ain=A_in, Cin=C_in, b_in = b_in,
                        f_in=f_in, rf = None, stride = None, gnb = gnb, 
                        method_deconvolution='oasis', check_nan = True)

        cnm2 = cnm2.fit(images)

        try:
            roi_spatial_footprints[z]  = cnm2.A
            roi_temporal_footprints[z] = cnm2.C
            roi_temporal_residuals[z]  = cnm2.YrA
            bg_spatial_footprints[z]   = cnm2.b
            bg_temporal_footprints[z]  = cnm2.f
        except:
            roi_spatial_footprints[z]  = cnm2.estimates.A
            roi_temporal_footprints[z] = cnm2.estimates.C
            roi_temporal_residuals[z]  = cnm2.estimates.YrA
            bg_spatial_footprints[z]   = cnm2.estimates.b
            bg_temporal_footprints[z]  = cnm2.estimates.f

        if os.path.exists(video_path):
            os.remove(video_path)

    del memmap_video

    if use_multiprocessing:
        if backend == 'multiprocessing':
            dview.close()
        else:
            try:
                dview.terminate()
            except:
                dview.shutdown()
        cm.stop_server()

    mmap_files = glob.glob(os.path.join(directory, "*.mmap"))
    for mmap_file in mmap_files:
        try:
            os.remove(mmap_file)
        except:
            pass

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    return roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints