示例#1
0
def _export_movie_to_memmap(tiff_files,
                            num_frames,
                            num_rows,
                            num_cols,
                            overwrite=False,
                            dest_dir=None):

    if dest_dir is None:
        # use the first movie's directory as the destination directory
        dest_dir, isxd_movie_name = os.path.split(tiff_files[0])

    # use the first movie's name as the output memmap file
    mmap_name = _get_memmap_name(tiff_files[0], num_frames, num_rows, num_cols)

    mmap_file = os.path.join(dest_dir, mmap_name)
    if os.path.exists(mmap_file):
        if overwrite:
            os.remove(mmap_file)
        else:
            return mmap_file

    # write a tiff file, use the name of the first movie as the tiff file base name
    root_dir, fname = os.path.split(tiff_files[0])
    base_name, ext = os.path.splitext(fname)

    # fix for edge case where default chunk size is larger than the entire file size
    n_chunks = min(num_rows * num_cols, 100)

    save_memmap(tiff_files, base_name=base_name, order='C', n_chunks=n_chunks)

    return mmap_file
示例#2
0
def caiman_motion_correct(fnames, opts):
    '''do the motion correction of the Caiman pipeline and save results.
    Parameters
    ----------
    fnames : string
        name of tiff movie
    opts : caiman object with the options for motion correction'''

    #%% start a cluster for parallel processing (if a cluster already exists it will be closed and a new session will be opened)
    if 'dview' in locals():
        cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # first we create a motion correction object with the parameters specified
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    mc.motion_correct(save_movie=True)
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file,
                               base_name='memmap_',
                               order='C',
                               border_to_0=border_to_0,
                               dview=dview)  # exclude borders
    cm.stop_server(dview=dview)
示例#3
0
def demo(parallel=False):

    # roughly number of cores on your machine minus 1
    n_processes = np.maximum(psutil.cpu_count() - 2, 1)
    p = 2  # order of the AR model (in general 1 or 2)
    stop_server()
    if parallel:
        start_server()
        c = Client()

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap(['example_movies/demoMovie.tif'],
                               base_name='Yr')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=c[:] if parallel else None,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        stop_server()

    # verifying the spatial components
    npt.assert_allclose(cnm.A.sum(), 32282000, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.C.sum(), 640.5, 1e-2)
示例#4
0
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(
            backend='local', n_processes=None, single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap([os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')],
                                base_name='Yr',
                                order = 'C')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes, method_init='greedy_roi', k=30, gSig=[4, 4], merge_thresh=.8,
                    p=p, dview=dview, Ain=None, method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server(dview=dview)

    # verifying the spatial components
    npt.assert_allclose(cnm.A.sum(), 281.1, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.C.sum(), 66271668, 1e-2)
    try:
        dview.terminate()
    except:
        pass
 def make_mmap(self, files):
     t = tic()
     print('Memory mapping current file...', end=' ')
     self.memmap = cm.save_memmap(files,
                                  base_name=f'MAP{self.fnumber}a',
                                  order='C',
                                  slices=[
                                      slice(0, -1,
                                            self.channels * self.planes),
                                      slice(0, 512),
                                      slice(self.x_start, self.x_end)
                                  ])
     print(f'done. Took {toc(t):.4f}s')
示例#6
0
def memmap_movie(fnames,load=True):
    '''memmap already motion corrected movie using caiman functions.
       if load return the memmap shaped ready to fit caiman (using cnm.fit(images))'''
    if 'dview' in locals():
        cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=12, single_thread=False)

    fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C',
                           border_to_0=0, dview=dview) # exclude borders
    cm.stop_server(dview=dview)
    if load:
        Yr, dims, T = cm.load_memmap(fname_new)
        images = np.reshape(Yr.T, [T] + list(dims), order='F')
        return images
示例#7
0
def mem_mapping(mc, border_to_0, dview, is_3d=False):
    """memory maps the file in order 'C' and then loads the new memory mapped
    file. The saved files from motion correction are memory mapped files stored
    in 'F' order. Their paths are stored in mc.mmap_file."""
    # Memory map the file in order 'C', excluding borders
    fname_new = cm.save_memmap(mc.mmap_file,
                               base_name='memmap_',
                               order='C',
                               border_to_0=border_to_0,
                               is_3D=is_3d,
                               dview=dview)

    # Load the file with framees in python format (T x X x Y)
    images = load_memmap(fname_new)
    return images
示例#8
0
def merge_denoised_tiff_files(movie, loaddir, savedir):
    #%%
    cpu_num = 2
    #cpu_num_spikepursuit = 1

    filenames = os.listdir(loaddir)
    counter = 0
    filenames_final = list()
    residualnames = list()
    while 'denoised_{}.tif'.format(counter) in filenames:
        m_new_denoised = cm.load(
            os.path.join(loaddir,
                         'denoised_{}.tif'.format(counter))).transpose(
                             2, 0, 1)
        i_new_sn = imio.imread(
            os.path.join(loaddir, 'Sn_image_{}.tif'.format(counter)))[:, :, 0]
        m_new_trend = cm.load(
            os.path.join(loaddir,
                         'trend_{}.tif'.format(counter))).transpose(2, 0, 1)
        movief = m_new_denoised * i_new_sn + m_new_trend
        movief.save(os.path.join(loaddir, 'movie{}.tif'.format(counter)))
        filenames_final.append(
            os.path.join(loaddir, 'movie{}.tif'.format(counter)))
        residualnames.append(
            os.path.join(loaddir, 'PMD_residual_{}.tif'.format(counter)))
        counter += 1
        print(counter)

    #%%
    residuals_movie = cm.load_movie_chain(residualnames)
    residuals_movie.save(os.path.join(savedir, 'PMD_residuals.tif'))
    #movie_big = cm.load_movie_chain(filenames_final)
    # %% Memory Mapping
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=cpu_num,
                                                     single_thread=False)
    fname_new = cm.save_memmap(filenames_final,
                               base_name=movie['movie_name'],
                               dview=dview,
                               n_chunks=10,
                               order='C')
    dview.terminate()
    fname = pathlib.Path(fname_new).name
    shutil.move(fname_new, os.path.join(savedir, fname))
    print('done')
示例#9
0
 def make_mmap(self, files):
     """Make memory mapped files for each plane in a set of tiffs."""
     t = tic()
     memmap = []
     for plane in range(self.planes):
         print(f'Memory mapping current file, plane {plane}...')
         plane_slice = plane * self.channels
         memmap.append(
             cm.save_memmap(files,
                            base_name=f'MAP{self.fnumber}_plane{plane}_a',
                            order='C',
                            slices=[
                                slice(plane_slice, -1,
                                      self.channels * self.planes),
                                slice(0, 512),
                                slice(self.x_start, self.x_end)
                            ]))
     print(f'Memory mapping done. Took {toc(t):.4f}s')
     return memmap
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap(
        [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')],
        base_name='Yr',
        order='C')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=dview,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server(dview=dview)

    # verifying the spatial components
    npt.assert_allclose(cnm.estimates.A.sum(), 281.1, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.estimates.C.sum(), 66271668, 1e-2)
    try:
        dview.terminate()
    except:
        pass
示例#11
0
    def motion_correct(self):
        """
        Do motion correction.

        """
        # Set up motion correction object and load parameters.
        mc = MotionCorrect(self.fnames,
                           dview=self.dview,
                           **self.opts.get_group('motion'))

        # Do motion correction.
        mc.motion_correct(save_movie=True)

        # Plot motion correction statistics.
        fname_mc = mc.fname_tot_els if self.opts.motion[
            'pw_rigid'] else mc.fname_tot_rig
        if self.opts.motion['pw_rigid']:
            self.bord_px = np.ceil(
                np.maximum(np.max(np.abs(mc.x_shifts_els)),
                           np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
        else:
            self.bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(
                np.int)
            plt.subplot(1, 2, 1)
            plt.imshow(mc.total_template_rig)  # % plot template
            plt.subplot(1, 2, 2)
            plt.plot(mc.shifts_rig)  # % plot rigid shifts
            plt.legend(['x shifts', 'y shifts'])
            plt.xlabel('frames')
            plt.ylabel('pixels')

        # Save memory map.
        self.bord_px = 0 if self.opts.motion[
            'border_nan'] is 'copy' else self.bord_px
        self.fname_new = cm.save_memmap(fname_mc,
                                        base_name='full',
                                        order='C',
                                        border_to_0=self.bord_px)
def calculate_temporal_components(video_paths, roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints):
    for i in range(len(video_paths)):
        video_path = video_paths[i]

        video = tifffile.imread(video_path)
        if len(video.shape) == 3:
            # add a z dimension
            video = video[:, np.newaxis, :, :]

        if i == 0:
            final_video = video.copy()
        else:
            final_video = np.concatenate([final_video, video], axis=0)

    roi_temporal_footprints = [ None for i in range(final_video.shape[1]) ]
    roi_temporal_residuals  = [ None for i in range(final_video.shape[1]) ]
    bg_temporal_footprints  = [ None for i in range(final_video.shape[1]) ]

    for z in range(final_video.shape[1]):
        final_video_path = "video_concatenated_z_{}.tif".format(z)
        tifffile.imsave(final_video_path, final_video[:, z, :, :])

        fname_new = cm.save_memmap([final_video_path], base_name='memmap_z_{}'.format(z), order='C') # exclude borders

        # # now load the file
        Yr, dims, T = cm.load_memmap(fname_new)

        images = final_video[:, z, :, :].transpose((1, 2, 0)).reshape((final_video.shape[2]*final_video.shape[3], final_video.shape[0]))

        Cin = np.zeros((roi_spatial_footprints[z].shape[1], final_video.shape[0]))
        fin = np.zeros((bg_spatial_footprints[z].shape[1], final_video.shape[0]))

        Yr, sn, g, psx = preprocess_data(Yr, dview=None)

        roi_temporal_footprints[z], _, _, bg_temporal_footprints[z], _, _, _, _, _, roi_temporal_residuals[z], _ = update_temporal_components(images, roi_spatial_footprints[z], bg_spatial_footprints[z], Cin, fin, bl=None, c1=None, g=g, sn=sn, nb=2, ITER=2, block_size=5000, num_blocks_per_run=20, debug=False, dview=None, p=0, method='cvx')

    return roi_temporal_footprints, roi_temporal_residuals, bg_temporal_footprints
示例#13
0
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap([
        os.path.abspath(cm.__path__[0][:-7]) + '/example_movies/demoMovie.tif'
    ],
                               base_name='Yr')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=dview,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server()

    # verifying the spatial components
    npt.assert_allclose(cnm.A.sum(), 31913160, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.C.sum(), 640.5, 1e-2)
示例#14
0
def run(batch_dir: str, UUID: str):
    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format=
        "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s"
    )

    start_time = time()

    output = {'status': 0, 'output_info': ''}
    n_processes = os.environ['_MESMERIZE_N_THREADS']
    n_processes = int(n_processes)
    file_path = os.path.join(batch_dir, UUID)

    filename = [file_path + '_input.tiff']
    input_params = pickle.load(open(file_path + '.params', 'rb'))

    # If Ain is specified
    if input_params['do_cnmfe']:
        Ain = None
        item_uuid = input_params['cnmfe_kwargs'].pop('Ain')

        if item_uuid:
            print('>> Ain specified, looking for cnm-A file <<')
            parent_batch_dir = os.environ['CURR_BATCH_DIR']
            item_out_file = os.path.join(parent_batch_dir, f'{item_uuid}.out')
            t0 = time()
            timeout = 60
            while not os.path.isfile(item_out_file):
                print('>>> cnm-A not found, waiting for 15 seconds <<<')
                sleep(15)
                if time() - t0 > timeout:
                    output.update({
                        'status':
                        0,
                        'output_info':
                        'Timeout exceeding in waiting for Ain input file'
                    })
                    raise TimeoutError(
                        'Timeout exceeding in waiting for Ain input file')

            if os.path.isfile(item_out_file):
                if json.load(open(item_out_file, 'r'))['status']:
                    Ain_path = os.path.join(parent_batch_dir,
                                            item_uuid + '_results.hdf5')
                    Ain = load_dict_from_hdf5(Ain_path)['estimates']['A']
                    print('>>> Found Ain file <<<')
                else:
                    raise FileNotFoundError(
                        '>>> Could not find specified Ain file <<<')

    print('*********** Creating Process Pool ***********')

    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=n_processes,
                                                     single_thread=False
                                                     #ignore_preexisting=True
                                                     )

    try:
        print('Creating memmap')

        # memmap_path = cm.save_memmap_each(
        #     filename,
        #     base_name='memmap-' + UUID,
        #     order='C',
        #     border_to_0=input_params['border_pix'],
        #     dview=dview)
        # memmap_path = cm.save_memmap_join(memmap_path, base_name='memmap-' + UUID, dview=dview)
        # # load memory mappable file
        # Yr, dims, T = cm.load_memmap(memmap_path)
        # Y = Yr.T.reshape((T,) + dims, order='F')

        memmap_path = cm.save_memmap(
            filename,
            base_name=f'memmap-',
            order='C',
            dview=dview,
            border_to_0=input_params['border_pix'],
        )

        Yr, dims, T = cm.load_memmap(memmap_path)
        Y = np.reshape(Yr.T, [T] + list(dims), order='F')

        if input_params['do_cnmfe']:
            gSig = input_params['cnmfe_kwargs']['gSig'][0]
        else:
            gSig = input_params['corr_pnr_kwargs']['gSig']

        cn_filter, pnr = cm.summary_images.correlation_pnr(Y,
                                                           swap_dim=False,
                                                           gSig=gSig)

        if not input_params['do_cnmfe'] and input_params['do_corr_pnr']:
            pickle.dump(cn_filter,
                        open(UUID + '_cn_filter.pikl', 'wb'),
                        protocol=4)
            pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)

            output_file_list = \
                [
                    UUID + '_pnr.pikl',
                    UUID + '_cn_filter.pikl',
                ]

            output.update({
                'output': UUID,
                'status': 1,
                'output_info': 'inspect correlation & pnr',
                'output_files': output_file_list
            })

            dview.terminate()

            for mf in glob(batch_dir + '/memmap-*'):
                os.remove(mf)

            end_time = time()
            processing_time = (end_time - start_time) / 60
            output.update({'processing_time': processing_time})

            json.dump(output, open(file_path + '.out', 'w'))

            return

        cnm = cnmf.CNMF(
            n_processes=n_processes,
            method_init='corr_pnr',
            dview=dview,
            Ain=Ain,
            only_init_patch=True,  # just leave it as is
            normalize_init=False,
            center_psf=True,
            **input_params['cnmfe_kwargs'],
        )

        cnm.fit(Y)

        #  DISCARD LOW QUALITY COMPONENTS
        cnm.params.set('quality', {
            'use_cnn': False,
            **input_params['eval_kwargs']
        })

        cnm.estimates.evaluate_components(Y, cnm.params, dview=dview)

        out_filename = f'{UUID}_results.hdf5'
        cnm.save(out_filename)

        pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)
        pickle.dump(cn_filter,
                    open(UUID + '_cn_filter.pikl', 'wb'),
                    protocol=4)

        output.update({
            'output':
            filename[:-5],
            'status':
            1,
            'output_files': [
                out_filename,
                UUID + '_pnr.pikl',
                UUID + '_cn_filter.pikl',
            ]
        })

    except Exception as e:
        output.update({
            'status': 0,
            'Y.shape': Y.shape,
            'output_info': traceback.format_exc()
        })

    dview.terminate()

    for mf in glob(batch_dir + '/memmap-*'):
        try:
            os.remove(mf)
        except:  # Windows doesn't like removing the memmaps
            pass

    end_time = time()
    processing_time = (end_time - start_time) / 60
    output.update({'processing_time': processing_time})

    json.dump(output, open(file_path + '.out', 'w'))
    img = m.local_correlations_movie(file_name=fnames[0],
                                     dview=dview,
                                     swap_dim=False,
                                     window=100,
                                     order_mean=-2).astype(np.float32)
#    img[np.isnan(img)]=0
#    img[img>1]=1
#    img=img*(img>0)
#%%
# location of dataset  (can actually be a list of filed to be concatenated)
add_to_movie = 0
# if minimum is negative subtract to make the data non-negative
base_name = 'Yr'
fname_new = cm.save_memmap(fnames,
                           dview=dview,
                           base_name=base_name,
                           add_to_movie=add_to_movie,
                           order='C')
#%% LOAD MEMORY MAPPABLE FILE
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
#%% play movie, press q to quit
play_movie = True
if play_movie:
    cm.movie(images).play(fr=50, magnification=2, gain=2)

#%% correlation image. From here infer neuron size and density
Cn = cm.movie(images).local_correlations(swap_dim=False)
plt.cla()
plt.imshow(Cn, cmap='gray')
示例#16
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    #%% save files to be processed

    # This datafile is distributed with Caiman
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # location of dataset  (can actually be a list of filed to be concatenated)
    add_to_movie = -np.min(cm.load(fnames[0],
                                   subindices=range(200))).astype(float)
    # determine minimum value on a small chunk of data
    add_to_movie = np.maximum(add_to_movie, 0)
    # if minimum is negative subtract to make the data non-negative
    base_name = 'Yr'
    fname_new = cm.save_memmap(fnames,
                               dview=dview,
                               base_name=base_name,
                               order='C',
                               add_to_movie=add_to_movie)

    #%% LOAD MEMORY MAPPABLE FILE
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    #%% play movie, press q to quit
    play_movie = False
    if play_movie:
        cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

#%% correlation image. From here infer neuron size and density
    Cn = cm.movie(images).local_correlations(swap_dim=False)
    plt.imshow(Cn, cmap='gray')
    plt.title('Correlation Image')

    #%% set up some parameters

    is_patches = True  # flag for processing in patches or not

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    #%% Now RUN CNMF
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    gnb=gnb,
                    rf=rf,
                    stride=stride,
                    rolling_sum=False)
    cnm = cnm.fit(images)

    #%% plot contour plots of components

    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of components')

    #%%
    A_in, C_in, b_in, f_in = cnm.A[:, :], cnm.C[:], cnm.b, cnm.f
    cnm2 = cnmf.CNMF(n_processes=1,
                     k=A_in.shape[-1],
                     gSig=gSig,
                     p=p,
                     dview=dview,
                     merge_thresh=merge_thresh,
                     Ain=A_in,
                     Cin=C_in,
                     b_in=b_in,
                     f_in=f_in,
                     rf=None,
                     stride=None,
                     gnb=gnb,
                     method_deconvolution='oasis',
                     check_nan=True)

    cnm2 = cnm2.fit(images)
    #%% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient
    min_SNR = 2.5  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.90  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.95  # if cnn classifier predicts below this value, reject

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=use_cnn,
                                         thresh_cnn_min=min_cnn_thr)
    #%% visualize selected and rejected components
    plt.figure()
    plt.subplot(1, 2, 1)
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components],
                                         Cn,
                                         thr=0.9)
    plt.title('Selected components')
    plt.subplot(1, 2, 2)
    plt.title('Discaded components')
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components_bad],
                                         Cn,
                                         thr=0.9)

    #%%
    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm2.A.tocsc()[:,
                                                              idx_components],
                                               Cn,
                                               thr=0.9)
    plt.title('Contour plots of components')
    #%% visualize selected components
    cm.utils.visualization.view_patches_bar(Yr,
                                            cnm2.A.tocsc()[:, idx_components],
                                            cnm2.C[idx_components, :],
                                            cnm2.b,
                                            cnm2.f,
                                            dims[0],
                                            dims[1],
                                            YrA=cnm2.YrA[idx_components, :],
                                            img=Cn)
    #%% STOP CLUSTER and clean up log files
    cm.stop_server()

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
示例#17
0
#                ring_size_factor=1.5, center_psf=True)

#%%
#cnm.options['init_params']['gSiz'] = (10, 10)
#cnm.options['init_params']['gSig'] = (3, 3)
#cnm.options['init_params']['min_corr'] = .85
#cnm.options['init_params']['min_pnr'] = 20
# cnm.options['init_params']['normalize_init']=False

#%%
memmap = True  # must be True for patches
if memmap:
    if '.mmap' in fname:
        fname_new = fname
    else:
        fname_new = cm.save_memmap([fname], base_name='Yr')
    Yr, dims, T = cm.load_memmap(fname_new)
    cnm.fit(Yr.T.reshape((T, ) + dims, order='F'))
else:
    cnm.fit(Y)
# %%
#A_tot, C_tot, b_tot, f_tot, YrA_tot, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
# %%
#crd = cm.utils.visualization.plot_contours(A_tot, cn_filter, thr=.95, vmax=0.95)
# %%
#plt.imshow(A_tot.sum(-1).reshape(dims, order='F'))
#
#
# %% DISCARD LOW QUALITY COMPONENT
#final_frate = 10
# r_values_min = 0.1  # threshold on space consistency
示例#18
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% First setup some parameters

    # dataset dependent parameters
    display_images = False  # Set this to true to show movies and plots
    fname = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    fr = 30  # imaging rate in frames per second
    decay_time = 0.4  # length of a typical transient in seconds

    # motion correction parameters
    niter_rig = 1  # number of iterations for rigid motion correction
    max_shifts = (6, 6)  # maximum allow rigid shift
    # for parallelization split the movies in  num_splits chuncks across time
    splits_rig = 56
    # start a new patch for pw-rigid motion correction every x pixels
    strides = (48, 48)
    # overlap between pathes (size of patch strides+overlaps)
    overlaps = (24, 24)
    # for parallelization split the movies in  num_splits chuncks across time
    splits_els = 56
    upsample_factor_grid = 4  # upsample factor to avoid smearing when merging patches
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    # parameters for source extraction and deconvolution
    p = 1  # order of the autoregressive system
    gnb = 2  # number of global background components
    merge_thresh = 0.8  # merging threshold, max correlation allowed
    # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    rf = 15
    stride_cnmf = 6  # amount of overlap between the patches in pixels
    K = 4  # number of components per patch
    gSig = [4, 4]  # expected half size of neurons
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    init_method = 'greedy_roi'
    is_dendrites = False  # flag for analyzing dendritic data
    # sparsity penalty for dendritic data analysis through sparse NMF
    alpha_snmf = None

    # parameters for component evaluation
    min_SNR = 2.5  # signal to noise ratio for accepting a component
    rval_thr = 0.8  # space correlation threshold for accepting a component
    cnn_thr = 0.8  # threshold for CNN based classifier

    #%% download the dataset if it's not present in your folder
    if fname[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
        fname = [download_demo(fname[0])]

#%% play the movie
# playing the movie using opencv. It requires loading the movie in memory. To
# close the video press q

    m_orig = cm.load_movie_chain(fname[:1])
    downsample_ratio = 0.2
    offset_mov = -np.min(m_orig[:100])
    moviehandle = m_orig.resize(1, 1, downsample_ratio)
    if display_images:
        moviehandle.play(gain=10, offset=offset_mov, fr=30, magnification=2)

#%% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    #%%% MOTION CORRECTION
    # first we create a motion correction object with the parameters specified
    min_mov = cm.load(fname[0], subindices=range(200)).min()
    # this will be subtracted from the movie to make it non-negative

    mc = MotionCorrect(fname[0],
                       min_mov,
                       dview=dview,
                       max_shifts=max_shifts,
                       niter_rig=niter_rig,
                       splits_rig=splits_rig,
                       strides=strides,
                       overlaps=overlaps,
                       splits_els=splits_els,
                       upsample_factor_grid=upsample_factor_grid,
                       max_deviation_rigid=max_deviation_rigid,
                       shifts_opencv=True,
                       nonneg_movie=True)
    # note that the file is not loaded in memory

    #%% Run piecewise-rigid motion correction using NoRMCorre
    mc.motion_correct_pwrigid(save_movie=True)
    m_els = cm.load(mc.fname_tot_els)
    bord_px_els = np.ceil(
        np.maximum(np.max(np.abs(mc.x_shifts_els)),
                   np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
    # maximum shift to be used for trimming against NaNs
    #%% compare with original movie
    moviehandle = cm.concatenate([
        m_orig.resize(1, 1, downsample_ratio) + offset_mov,
        m_els.resize(1, 1, downsample_ratio)
    ],
                                 axis=2)
    display_images = False
    if display_images:
        moviehandle.play(fr=60, q_max=99.5, magnification=2,
                         offset=0)  # press q to exit

#%% MEMORY MAPPING
# memory map the file in order 'C'
    fnames = mc.fname_tot_els  # name of the pw-rigidly corrected file.
    border_to_0 = bord_px_els  # number of pixels to exclude
    fname_new = cm.save_memmap(fnames,
                               base_name='memmap_',
                               order='C',
                               border_to_0=bord_px_els)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

    #%% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    #%% RUN CNMF ON PATCHES

    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0)
    t1 = time.time()

    cnm = cnmf.CNMF(n_processes=1,
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=0,
                    dview=dview,
                    rf=rf,
                    stride=stride_cnmf,
                    memory_fact=1,
                    method_init=init_method,
                    alpha_snmf=alpha_snmf,
                    only_init_patch=False,
                    gnb=gnb,
                    border_pix=bord_px_els)
    cnm = cnm.fit(images)

    #%% plot contours of found components
    Cn = cm.local_correlations(images.transpose(1, 2, 0))
    Cn[np.isnan(Cn)] = 0
    plt.figure()
    crd = plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of found components')

    #%% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=False,
                                         thresh_cnn_min=cnn_thr)

    #%% PLOT COMPONENTS

    if display_images:
        plt.figure()
        plt.subplot(121)
        crd_good = cm.utils.visualization.plot_contours(cnm.A[:,
                                                              idx_components],
                                                        Cn,
                                                        thr=.8,
                                                        vmax=0.75)
        plt.title('Contour plots of accepted components')
        plt.subplot(122)
        crd_bad = cm.utils.visualization.plot_contours(
            cnm.A[:, idx_components_bad], Cn, thr=.8, vmax=0.75)
        plt.title('Contour plots of rejected components')

#%% VIEW TRACES (accepted and rejected)

    if display_images:
        view_patches_bar(Yr,
                         cnm.A.tocsc()[:, idx_components],
                         cnm.C[idx_components],
                         cnm.b,
                         cnm.f,
                         dims[0],
                         dims[1],
                         YrA=cnm.YrA[idx_components],
                         img=Cn)

        view_patches_bar(Yr,
                         cnm.A.tocsc()[:, idx_components_bad],
                         cnm.C[idx_components_bad],
                         cnm.b,
                         cnm.f,
                         dims[0],
                         dims[1],
                         YrA=cnm.YrA[idx_components_bad],
                         img=Cn)

#%% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    A_in, C_in, b_in, f_in = cnm.A[:, idx_components], cnm.C[
        idx_components], cnm.b, cnm.f
    cnm2 = cnmf.CNMF(n_processes=1,
                     k=A_in.shape[-1],
                     gSig=gSig,
                     p=p,
                     dview=dview,
                     merge_thresh=merge_thresh,
                     Ain=A_in,
                     Cin=C_in,
                     b_in=b_in,
                     f_in=f_in,
                     rf=None,
                     stride=None,
                     gnb=gnb,
                     method_deconvolution='oasis',
                     check_nan=True)

    cnm2 = cnm2.fit(images)

    #%% Extract DF/F values

    F_dff = detrend_df_f(cnm2.A,
                         cnm2.b,
                         cnm2.C,
                         cnm2.f,
                         YrA=cnm2.YrA,
                         quantileMin=8,
                         frames_window=250)

    #%% Show final traces
    cnm2.view_patches(Yr, dims=dims, img=Cn)

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

#%% reconstruct denoised movie
    denoised = cm.movie(cnm2.A.dot(cnm2.C) + cnm2.b.dot(cnm2.f)).reshape(
        dims + (-1, ), order='F').transpose([2, 0, 1])

    #%% play along side original data
    moviehandle = cm.concatenate([
        m_els.resize(1, 1, downsample_ratio),
        denoised.resize(1, 1, downsample_ratio)
    ],
                                 axis=2)
    if display_images:
        moviehandle.play(fr=60, gain=15, magnification=2,
                         offset=0)  # press q to exit
示例#19
0
               np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
# maximum shift to be used for trimming against NaNs
#%% compare with original movie
cm.concatenate([
    m_orig.resize(1, 1, downsample_ratio) + offset_mov,
    m_els.resize(1, 1, downsample_ratio)
],
               axis=2).play(fr=60, gain=15, magnification=2,
                            offset=0)  # press q to exit

#%% MEMORY MAPPING
# memory map the file in order 'C'
fnames = [mc.fname_tot_els]  # name of the pw-rigidly corrected file.
border_to_0 = bord_px_els  # number of pixels to exclude
fname_new = cm.save_memmap(fnames,
                           base_name='memmap_',
                           order='C',
                           border_to_0=bord_px_els)  # exclude borders

# now load the file
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
#load frames in python format (T x X x Y)

#%% restart cluster to clean up memory
dview.terminate()
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                 n_processes=None,
                                                 single_thread=False)

#%% RUN CNMF ON PATCHES
示例#20
0
from memory_profiler import memory_usage
from multiprocessing import Pool, cpu_count
import numpy as np
from time import time, sleep
import matplotlib.pyplot as plt
from scipy.io import loadmat
import os
# set MKL_NUM_THREADS and OPENBLAS_NUM_THREADS to 1 outside via export!
# takes about 170 mins for all runs

base_folder = '/mnt/ceph/neuro/DataForPublications/DATA_PAPER_ELIFE/WEBSITE/'
fname = os.path.join(base_folder,'test_sim.mat')
dims = (253, 316)
Yr = loadmat(fname)['Y']
Y = Yr.T.reshape((-1,) + dims, order='F')
cm.save_memmap([Y], base_name='Yr', order='C')


def main(n_processes=None, patches=True, rf=64):

    t = -time()

    Yr, dims, T = cm.load_memmap(os.path.abspath('./Yr_d1_253_d2_316_d3_1_order_C_frames_2000_.mmap'))
    Y = Yr.T.reshape((T,) + dims, order='F')

    # c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=n_processes)
    # above line doesn't work cause memory_profiler creates some multiprocessing object itself
    if n_processes is None:
        n_processes = cpu_count()
    dview = Pool(n_processes) if patches else None
    print('{0} processes'.format(n_processes))
示例#21
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %% start the cluster
    try:
        cm.stop_server()  # stop it if it was running
    except ():
        pass

    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local',
        n_processes=
        24,  # number of process to use, if you go out of memory try to reduce this one
        single_thread=False)

    # %% First setup some parameters for motion correction
    # dataset dependent parameters
    fnames = ['data_endoscope.tif']  # filename to be processed
    fnames = [download_demo(fnames[0])]  # download file if not already present
    filename_reorder = fnames
    fr = 10  # movie frame rate
    decay_time = 0.4  # length of a typical transient in seconds

    # motion correction parameters
    motion_correct = True  # flag for motion correction
    pw_rigid = False  # flag for pw-rigid motion correction

    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    #                      change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3
    border_nan = 'copy'

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = params.CNMFParams(params_dict=mc_dict)

    # %% MOTION CORRECTION
    #  The pw_rigid flag set above, determines where to use rigid or pw-rigid
    #  motion correction
    if motion_correct:
        # do motion correction rigid
        mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
        mc.motion_correct(save_movie=True)
        fname_mc = mc.fname_tot_els if pw_rigid else mc.fname_tot_rig
        if pw_rigid:
            bord_px = np.ceil(
                np.maximum(np.max(np.abs(mc.x_shifts_els)),
                           np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
        else:
            bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
            plt.subplot(1, 2, 1)
            plt.imshow(mc.total_template_rig)  # % plot template
            plt.subplot(1, 2, 2)
            plt.plot(mc.shifts_rig)  # % plot rigid shifts
            plt.legend(['x shifts', 'y shifts'])
            plt.xlabel('frames')
            plt.ylabel('pixels')

        bord_px = 0 if border_nan == 'copy' else bord_px
        fname_new = cm.save_memmap(fname_mc,
                                   base_name='memmap_',
                                   order='C',
                                   border_to_0=bord_px)
    else:  # if no motion correction just memory map the file
        fname_new = cm.save_memmap(filename_reorder,
                                   base_name='memmap_',
                                   order='C',
                                   border_to_0=0,
                                   dview=dview)

    # load memory mappable file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = Yr.T.reshape((T, ) + dims, order='F')

    # %% Parameters for source extraction and deconvolution (CNMF-E algorithm)

    p = 1  # order of the autoregressive system
    K = None  # upper bound on number of components per patch, in general None for 1p data
    gSig = (
        3, 3
    )  # gaussian width of a 2D gaussian kernel, which approximates a neuron
    gSiz = (13, 13)  # average diameter of a neuron, in general 4*gSig+1
    Ain = None  # possibility to seed with predetermined binary masks
    merge_thr = .7  # merging threshold, max correlation allowed
    rf = 40  # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
    stride_cnmf = 20  # amount of overlap between the patches in pixels
    #                     (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
    tsub = 2  # downsampling factor in time for initialization,
    #                     increase if you have memory problems
    ssub = 1  # downsampling factor in space for initialization,
    #                     increase if you have memory problems
    #                     you can pass them here as boolean vectors
    low_rank_background = None  # None leaves background of each patch intact,
    #                     True performs global low-rank approximation if gnb>0
    gnb = 0  # number of background components (rank) if positive,
    #                     else exact ring model with following settings
    #                         gnb= 0: Return background as b and W
    #                         gnb=-1: Return full rank background B
    #                         gnb<-1: Don't return background
    nb_patch = 0  # number of background components (rank) per patch if gnb>0,
    #                     else it is set automatically
    min_corr = .8  # min peak value from correlation image
    min_pnr = 10  # min peak to noise ration from PNR image
    ssub_B = 2  # additional downsampling factor in space for background
    ring_size_factor = 1.4  # radius of ring is gSiz*ring_size_factor

    opts.change_params(
        params_dict={
            'dims': dims,
            'method_init': 'corr_pnr',  # use this for 1 photon
            'K': K,
            'gSig': gSig,
            'gSiz': gSiz,
            'merge_thr': merge_thr,
            'p': p,
            'tsub': tsub,
            'ssub': ssub,
            'rf': rf,
            'stride': stride_cnmf,
            'only_init': True,  # set it to True to run CNMF-E
            'nb': gnb,
            'nb_patch': nb_patch,
            'method_deconvolution': 'oasis',  # could use 'cvxpy' alternatively
            'low_rank_background': low_rank_background,
            'update_background_components':
            True,  # sometimes setting to False improve the results
            'min_corr': min_corr,
            'min_pnr': min_pnr,
            'normalize_init': False,  # just leave as is
            'center_psf': True,  # leave as is for 1 photon
            'ssub_B': ssub_B,
            'ring_size_factor': ring_size_factor,
            'del_duplicates':
            True,  # whether to remove duplicates from initialization
            'border_pix': bord_px
        })  # number of pixels to not consider in the borders)

    # %% compute some summary images (correlation and peak to noise)
    # change swap dim if output looks weird, it is a problem with tiffile
    cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1],
                                                       gSig=gSig[0],
                                                       swap_dim=False)
    # if your images file is too long this computation will take unnecessarily
    # long time and consume a lot of memory. Consider changing images[::1] to
    # images[::5] or something similar to compute on a subset of the data

    # inspect the summary images and set the parameters
    inspect_correlation_pnr(cn_filter, pnr)
    # print parameters set above, modify them if necessary based on summary images
    print(min_corr)  # min correlation of peak (from correlation image)
    print(min_pnr)  # min peak to noise ratio

    # %% RUN CNMF ON PATCHES
    cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts)
    cnm.fit(images)

    # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #    cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #    cnm1.fit_file(motion_correct=True)

    # %% DISCARD LOW QUALITY COMPONENTS
    min_SNR = 2.5  # adaptive way to set threshold on the transient size
    r_values_min = 0.85  # threshold on space consistency (if you lower more components
    #                        will be accepted, potentially with worst quality)
    cnm.params.set('quality', {
        'min_SNR': min_SNR,
        'rval_thr': r_values_min,
        'use_cnn': False
    })
    cnm.estimates.evaluate_components(images, cnm.params, dview=dview)

    print(' ***** ')
    print('Number of total components: ', len(cnm.estimates.C))
    print('Number of accepted components: ', len(cnm.estimates.idx_components))

    # %% PLOT COMPONENTS
    cnm.dims = dims
    display_images = True  # Set to true to show movies and images
    if display_images:
        cnm.estimates.plot_contours(img=cn_filter,
                                    idx=cnm.estimates.idx_components)
        cnm.estimates.view_components(images, idx=cnm.estimates.idx_components)

# %% MOVIES
    display_images = False  # Set to true to show movies and images
    if display_images:
        # fully reconstructed movie
        cnm.estimates.play_movie(images,
                                 q_max=99.5,
                                 magnification=2,
                                 include_bck=True,
                                 gain_res=10,
                                 bpx=bord_px)
        # movie without background
        cnm.estimates.play_movie(images,
                                 q_max=99.9,
                                 magnification=2,
                                 include_bck=False,
                                 gain_res=4,
                                 bpx=bord_px)

# %% STOP SERVER
    cm.stop_server(dview=dview)
def perform_cnmf(video, params, roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints, use_multiprocessing=True):
    if use_multiprocessing:
        backend = 'multiprocessing'

        cm.stop_server()
        c, dview, n_processes = cm.cluster.setup_cluster(backend=backend, n_processes=None, single_thread=False)
    else:
        dview = None

    # print("~")
    # print(roi_spatial_footprints.shape)

    video_path = "video_temp.tif"
    tifffile.imsave(video_path, video)

    # dataset dependent parameters
    fnames         = [video_path]          # filename to be processed
    fr             = params['imaging_fps'] # imaging rate in frames per second
    decay_time     = params['decay_time']  # length of a typical transient in seconds
    
    # parameters for source extraction and deconvolution
    p              = params['autoregressive_order']             # order of the autoregressive system
    gnb            = params['num_bg_components']                # number of global background components
    merge_thresh   = params['merge_threshold']                  # merging threshold, max correlation allowed
    rf             = None                                       # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf    = 6                                          # amount of overlap between the patches in pixels
    K              = roi_spatial_footprints.shape[1]                   # number of components per patch
    gSig           = [params['half_size'], params['half_size']] # expected half size of neurons
    init_method    = 'greedy_roi'                               # initialization method (if analyzing dendritic data using 'sparse_nmf')
    rolling_sum    = True
    rolling_length = 50
    is_dendrites   = False                                      # flag for analyzing dendritic data
    alpha_snmf     = None                                       # sparsity penalty for dendritic data analysis through sparse NMF

    # parameters for component evaluation
    min_SNR        = params['min_snr']          # signal to noise ratio for accepting a component
    rval_thr       = params['min_spatial_corr'] # space correlation threshold for accepting a component
    cnn_thr        = params['cnn_threshold']    # threshold for CNN based classifier

    border_pix = 0

    fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C') # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')


    cnm = cnmf.CNMF(n_processes=8, k=K, gSig=gSig, merge_thresh= merge_thresh, 
                    p = p,  dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
                    method_init=init_method, alpha_snmf=alpha_snmf, rolling_sum=rolling_sum,
                    only_init_patch = True, skip_refinement=True, gnb = gnb, border_pix = border_pix, ssub=1, ssub_B=1, tsub=1, Ain=roi_spatial_footprints, Cin=roi_temporal_footprints, b_in=bg_spatial_footprints, f_in=bg_temporal_footprints, do_merge=False)

    cnm = cnm.fit(images)

    roi_spatial_footprints  = cnm.A
    roi_temporal_footprints = cnm.C
    roi_temporal_residuals  = cnm.YrA
    bg_spatial_footprints   = cnm.b
    bg_temporal_footprints  = cnm.f

    if use_multiprocessing:
        if backend == 'multiprocessing':
            dview.close()
        else:
            try:
                dview.terminate()
            except:
                dview.shutdown()
        cm.stop_server()

    return roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints
def find_rois_cnmf(video_path, params, mc_borders=None, use_multiprocessing=True):
    full_video_path = video_path

    directory = os.path.dirname(full_video_path)
    filename  = os.path.basename(full_video_path)

    memmap_video = tifffile.memmap(video_path)
    print(memmap_video.shape)

    if len(memmap_video.shape) == 5:
        num_z = memmap_video.shape[2]
    else:
        num_z = memmap_video.shape[1]

    roi_spatial_footprints  = [ None for i in range(num_z) ]
    roi_temporal_footprints = [ None for i in range(num_z) ]
    roi_temporal_residuals  = [ None for i in range(num_z) ]
    bg_spatial_footprints   = [ None for i in range(num_z) ]
    bg_temporal_footprints  = [ None for i in range(num_z) ]

    # Create the cluster
    if use_multiprocessing:
        if os.name == 'nt':
            backend = 'multiprocessing'
        else:
            backend = 'ipyparallel'

        cm.stop_server()
        c, dview, n_processes = cm.cluster.setup_cluster(backend=backend, n_processes=None, single_thread=False)
    else:
        dview = None

    for z in range(num_z):
        fname = os.path.splitext(filename)[0] + "_masked_z_{}.tif".format(z)

        video_path = os.path.join(directory, fname)

        if len(memmap_video.shape) == 5:
            tifffile.imsave(video_path, memmap_video[:, :, z, :, :].reshape((-1, memmap_video.shape[3], memmap_video.shape[4])))
        else:
            tifffile.imsave(video_path, memmap_video[:, z, :, :])

        # dataset dependent parameters
        fnames         = [video_path]          # filename to be processed
        fr             = params['imaging_fps'] # imaging rate in frames per second
        decay_time     = params['decay_time']  # length of a typical transient in seconds
        
        # parameters for source extraction and deconvolution
        p              = params['autoregressive_order']             # order of the autoregressive system
        gnb            = params['num_bg_components']                # number of global background components
        merge_thresh   = params['merge_threshold']                  # merging threshold, max correlation allowed
        rf             = None                                       # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
        stride_cnmf    = 6                                          # amount of overlap between the patches in pixels
        K              = params['num_components']                   # number of components per patch
        gSig           = [params['half_size'], params['half_size']] # expected half size of neurons
        init_method    = 'greedy_roi'                               # initialization method (if analyzing dendritic data using 'sparse_nmf')
        rolling_sum    = True
        rolling_length = 50
        is_dendrites   = False                                      # flag for analyzing dendritic data
        alpha_snmf     = None                                       # sparsity penalty for dendritic data analysis through sparse NMF

        # parameters for component evaluation
        min_SNR        = params['min_snr']          # signal to noise ratio for accepting a component
        rval_thr       = params['min_spatial_corr'] # space correlation threshold for accepting a component
        cnn_thr        = params['cnn_threshold']    # threshold for CNN based classifier

        if mc_borders is not None:
            border_pix = mc_borders[z]
        else:
            border_pix = 0

        fname_new = cm.save_memmap(fnames, base_name='memmap_z_{}'.format(z), order='C') # exclude borders

        # now load the file
        Yr, dims, T = cm.load_memmap(fname_new)
        d1, d2 = dims
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh= merge_thresh, 
                        p = p,  dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
                        method_init=init_method, alpha_snmf=alpha_snmf, rolling_sum=rolling_sum,
                        only_init_patch = False, gnb = gnb, border_pix = border_pix, ssub=1, ssub_B=1, tsub=1)

        cnm = cnm.fit(images)

        try:
            A_in, C_in, b_in, f_in = cnm.A, cnm.C, cnm.b, cnm.f
        except:
            A_in, C_in, b_in, f_in = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f

        cnm2 = cnmf.CNMF(n_processes=1, k=A_in.shape[-1], gSig=gSig, p=p, dview=dview,
                        merge_thresh=merge_thresh,  Ain=A_in, Cin=C_in, b_in = b_in,
                        f_in=f_in, rf = None, stride = None, gnb = gnb, 
                        method_deconvolution='oasis', check_nan = True)

        cnm2 = cnm2.fit(images)

        try:
            roi_spatial_footprints[z]  = cnm2.A
            roi_temporal_footprints[z] = cnm2.C
            roi_temporal_residuals[z]  = cnm2.YrA
            bg_spatial_footprints[z]   = cnm2.b
            bg_temporal_footprints[z]  = cnm2.f
        except:
            roi_spatial_footprints[z]  = cnm2.estimates.A
            roi_temporal_footprints[z] = cnm2.estimates.C
            roi_temporal_residuals[z]  = cnm2.estimates.YrA
            bg_spatial_footprints[z]   = cnm2.estimates.b
            bg_temporal_footprints[z]  = cnm2.estimates.f

        if os.path.exists(video_path):
            os.remove(video_path)

    del memmap_video

    if use_multiprocessing:
        if backend == 'multiprocessing':
            dview.close()
        else:
            try:
                dview.terminate()
            except:
                dview.shutdown()
        cm.stop_server()

    mmap_files = glob.glob(os.path.join(directory, "*.mmap"))
    for mmap_file in mmap_files:
        try:
            os.remove(mmap_file)
        except:
            pass

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    return roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints
示例#24
0
#%% Run piecewise-rigid motion correction using NoRMCorre
mc.motion_correct_rigid(save_movie=True)
#%%
m_els = cm.load(mc.fname_tot_rig)
bord_px_els = np.max(np.ceil(np.abs(mc.shifts_rig))).astype(np.int)
# maximum shift to be used for trimming against NaNs
#%% compare with original movie
cm.concatenate([m_orig.resize(1, 1, downsample_ratio) + offset_mov,
                m_els.resize(1, 1, downsample_ratio)],
               axis=2).play(fr=60, gain=1, magnification=1, offset=0)  # press q to exit

#%% MEMORY MAPPING
# memory map the file in order 'C'
fnames = mc.fname_tot_rig   # name of the pw-rigidly corrected file.
border_to_0 = bord_px_els     # number of pixels to exclude
fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C',
                           border_to_0=border_to_0)  # exclude borders

# now load the file
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
# load frames in python format (T x X x Y)

#%% restart cluster to clean up memory
dview.terminate()
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)
#%%
Cn = cm.local_correlations(images.transpose(1, 2, 0))
Cn[np.isnan(Cn)] = 0
plt.figure()
    new_templ = mc.total_template_rig

    plt.subplot(1, 2, 1)
    plt.imshow(new_templ)  #% plot template
    plt.subplot(1, 2, 2)
    plt.plot(mc.shifts_rig)  #% plot rigid shifts
    plt.legend(['x shifts', 'y shifts'])
    plt.xlabel('frames')
    plt.ylabel('pixels')

    bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(
        np.int)  #borders to eliminate from movie because of motion correction

    fname_new = cm.save_memmap(
        [mc.fname_tot_rig], base_name='memmap_', order='C'
    )  # transforming memoruy mapped file in C order (efficient to perform computing)
else:
    #% create memory mappable file
    fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C')

# load memory mappable file
Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T, ) + dims, order='F')
#%% compute some summary images (correlation and peak to noise)
cn_filter, pnr = cm.summary_images.correlation_pnr(
    Y, gSig=gSig, swap_dim=False
)  # change swap dim if output looks weird, it is a problem with tiffile
#%% inspect the summary images and set the parameters
inspect_correlation_pnr(cn_filter, pnr)
#%%
#%%
dview.terminate()
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                 n_processes=10,
                                                 single_thread=False)

Cn = correlation_image_ecobost(names_tots, dview=dview)

# maximum shift to be used for trimming against NaNs
#%% MEMORY MAPPING
# memory map the file in order 'C'
fnames = names_tots  # name of the pw-rigidly corrected file.
border_to_0 = 6  # number of pixels to exclude
fname_new = cm.save_memmap(fnames,
                           base_name='memmap_',
                           order='C',
                           border_to_0=border_to_0,
                           dview=dview)  # exclude borders

#%% now load the file
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
# load frames in python format (T x X x Y)

#%% restart cluster to clean up memory
dview.terminate()
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                 n_processes=None,
                                                 single_thread=False)
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%%
    """
    General parameters
    """
    play_movie = 1
    plot_extras = 1
    plot_extras_cell = 1
    compute_mc_metrics = 1

    #%% Select file(s) to be processed (download if not present)
    """
    Load file
    """

    #fnames = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    #if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
    #    fnames = [download_demo(fnames[0])]

    #fnames = ['/home/yuriy/Desktop/Data/rest1_5_9_19_cut.tif']

    #f_dir = 'C:\\Users\\rylab_dataPC\\Desktop\\Yuriy\\caiman_data\\short\\'
    f_dir = 'G:\\analysis\\190828-calcium_voltage\\soma_dendrites\\pCAG_jREGECO1a_ASAP3_anesth_001\\'
    f_name = 'Ch1'
    f_ext = 'tif'
    fnames = [f_dir + f_name + '.' + f_ext]

    #fnames = ['C:/Users/rylab_dataPC/Desktop/Yuriy/caiman_data/rest1_5_9_19_2_cut_ca.hdf5']

    #%% First setup some parameters for data and motion correction
    """
    Parameters
    """

    # dataset dependent parameters
    fr = 30  # imaging rate in frames per second
    decay_time = 1
    #0.4    # length of a typical transient in seconds
    dxy = (2., 2.)  # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)  # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um

    # motion correction parameters
    pw_rigid = True  # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    #max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
    max_shifts = [6, 6]
    # start a new patch for pw-rigid motion correction every x pixels
    #strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
    strides = [48, 48]
    # overlap between pathes (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

    # %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q

    if play_movie:
        m_orig = cm.load_movie_chain(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

    # %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # note that the file is not loaded in memory

    # %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

    # type "mc."and press TAB to see all interesting associated variables and self. outputs
    # interesting outputs
    # saved file is mc.fname_tot_els / mc.fname_tot_rig
    # mc.x_shifts_els / mc.y_shifts_els: shifts in x/y per frame per patch
    # mc.coord_shifts_els: coordinates associated to patches with shifts
    # mc.total_template_els: updated template for pw
    # mc.total_template_rig: updated template for rigid
    # mc.templates_rig: templates for each iteration in rig

    #%% # compute metrics for the results (TAKES TIME!!)
    if compute_mc_metrics:
        # not finished
        bord_px_els = np.ceil(
            np.maximum(np.max(np.abs(mc.x_shifts_els)),
                       np.max(np.abs(mc.y_shifts_els)))).astype(np.int)

        final_size = np.subtract(
            mc.total_template_els.shape,
            2 * bord_px_els)  # remove pixels in the boundaries
        winsize = 100
        swap_dim = False
        resize_fact_flow = .2  # downsample for computing ROF

        tmpl_rig, correlations_orig, flows_orig, norms_orig, crispness_orig = cm.motion_correction.compute_metrics_motion_correction(
            fnames[0],
            final_size[0],
            final_size[1],
            swap_dim,
            winsize=winsize,
            play_flow=False,
            resize_fact_flow=resize_fact_flow)

        plt.figure()
        plt.plot(correlations_orig)

    # %% compare with original movie
    if play_movie:
        m_orig = cm.load_movie_chain(fnames)
        m_els = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([
            m_orig.resize(1, 1, ds_ratio) - mc.min_mov * mc.nonneg_movie,
            m_els.resize(1, 1, ds_ratio)
        ],
                                     axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit
        del m_orig
        del m_els

    if plot_extras:
        # plot total template
        plt.figure()
        plt.imshow(mc.total_template_els)
        plt.title('Template after iteration')
        # plot x and y corrections
        plt.figure()
        plt.plot(mc.shifts_rig)
        plt.title('Rigid motion correction xy movement')
        plt.legend(['x shift', 'y shift'])
        plt.xlabel('frames')

    # %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file,
                               base_name='memmap_',
                               order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

    # %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%  parameters for source extraction and deconvolution

    p = 2  # order of the autoregressive system
    gnb = 2  # number of global background components
    merge_thr = 0.85  # merging threshold, max correlation allowed
    rf = 15  # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6  # amount of overlap between the patches in pixels
    K = 2  # number of components per patch
    gSig = [15, 15]  # expected half size of neurons in pixels
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    method_init = 'greedy_roi'
    ssub = 1  # spatial subsampling during initialization
    tsub = 1  # temporal subsampling during intialization

    # parameters for component evaluation
    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'nb': gnb,
        'rf': rf,
        'K': K,
        'gSig': gSig,
        'stride': stride_cnmf,
        'method_init': method_init,
        'rolling_sum': True,
        'merge_thr': merge_thr,
        'n_processes': n_processes,
        'only_init': True,
        'ssub': ssub,
        'tsub': tsub
    }

    opts.change_params(params_dict=opts_dict)
    # %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0)

    opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

    if plot_extras_cell:
        num_cell_plot = 51
        plt.figure()
        plt.plot(cnm.estimates.C[num_cell_plot, :])
        plt.title('Temporal component')
        plt.legend(['Cell ' + str(num_cell_plot)])
        # plot component sptial profile A
        # first convert back to dense components
        plot_spat_A = cnm.estimates.A[:, num_cell_plot].toarray().reshape(
            list(dims))
        plt.figure()
        plt.imshow(plot_spat_A)
        plt.title('Spatial component cell ' + str(num_cell_plot))

    # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)

    # %% plot contours of found components
    Cn = cm.local_correlations(images, swap_dim=False)
    Cn[np.isnan(Cn)] = 0
    cnm.estimates.plot_contours(img=Cn)
    plt.title('Contour plots of found components')

    if plot_extras:
        plt.figure()
        plt.imshow(Cn)
        plt.title('Local correlations')

    # %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm.params.change_params({'p': p})
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # signal to noise ratio for accepting a component
    rval_thr = 0.90  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected

    cnm2.params.set(
        'quality', {
            'decay_time': decay_time,
            'min_SNR': min_SNR,
            'rval_thr': rval_thr,
            'use_cnn': True,
            'min_cnn_thr': cnn_thr,
            'cnn_lowest': cnn_lowest
        })
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% PLOT COMPONENTS
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    plt.suptitle('Component selection: min_SNR=' + str(min_SNR) +
                 '; rval_thr=' + str(rval_thr) + '; cnn prob range=[' +
                 str(cnn_lowest) + ' ' + str(cnn_thr) + ']')

    # %% VIEW TRACES (accepted and rejected)

    if plot_extras:
        cnm2.estimates.view_components(images,
                                       img=Cn,
                                       idx=cnm2.estimates.idx_components)
        plt.suptitle('Accepted')
        cnm2.estimates.view_components(images,
                                       img=Cn,
                                       idx=cnm2.estimates.idx_components_bad)
        plt.suptitle('Rejected')

    #plt.figure();
    #plt.plot(cnm2.estimates.YrA[0,:]+cnm2.estimates.C[0,:])
    #
    #
    #
    #
    #plt.figure();
    #plt.plot(cnm2.estimates.R[0,:]-cnm2.estimates.YrA[0,:]);
    #plt.plot();
    #plt.show();
    #
    #
    #plt.figure();
    #plt.plot(cnm2.estimates.detrend_df_f[1,:])

    # these store the good and bad components, and next step sorts them
    # cnm2.estimates.idx_components
    # cnm2.estimates.idx_components_bad

    #%% update object with selected components
    #cnm2.estimates.select_components(use_object=True)
    #%% Extract DF/F values
    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)

    #%% Show final traces

    cnm2.estimates.view_components(img=Cn)
    plt.suptitle("Final results")

    #%% Save the mc data as in cmn struct as well

    ##
    #mc_out = dict(
    #            pw_rigid            = mc.pw_rigid,
    #            fname               = mc.fname,
    #            mmap_file           = mc.mmap_file,
    #            total_template_els  = mc.total_template_els,
    #            total_template_rig  = mc.total_template_rig,
    #            border_nan          = mc.border_nan,
    #            border_to_0         = mc.border_to_0,
    #            x_shifts_els        = mc.x_shifts_els,
    #            y_shifts_els        = mc.y_shifts_els,
    #            Cn                  = Cn
    #            )
    #
    #
    #deepdish.io.save(fnames[0] + '_mc_data.hdf5', mc_out)

    #%% reconstruct denoised movie (press q to exit)
    if play_movie:
        cnm2.estimates.play_movie(images,
                                  q_max=99.9,
                                  gain_res=2,
                                  magnification=2,
                                  bpx=border_to_0,
                                  include_bck=False)  # background not shown

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    save_results = True
    if save_results:
        cnm2.save(fnames[0][:-4] + '_results.hdf5')
示例#28
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% First setup some parameters

    # dataset dependent parameters
    display_images = False  # Set to true to show movies and images
    fnames = ['data_endoscope.tif']  # filename to be processed
    frate = 10  # movie frame rate
    decay_time = 0.4  # length of a typical transient in seconds

    # motion correction parameters
    do_motion_correction_nonrigid = True
    do_motion_correction_rigid = False  # in this case it will also save a rigid motion corrected movie
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    #                      change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    splits_rig = 10  # for parallelization split the movies in  num_splits chuncks across time
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    # for parallelization split the movies in  num_splits chuncks across time
    # (remember that it should hold that length_movie/num_splits_to_process_rig>100)
    splits_els = 10
    upsample_factor_grid = 4  # upsample factor to avoid smearing when merging patches
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    # parameters for source extraction and deconvolution
    p = 1  # order of the autoregressive system
    K = None  # upper bound on number of components per patch, in general None
    gSig = 3  # gaussian width of a 2D gaussian kernel, which approximates a neuron
    gSiz = 13  # average diameter of a neuron, in general 4*gSig+1
    merge_thresh = .7  # merging threshold, max correlation allowed
    rf = 40  # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
    stride_cnmf = 20  # amount of overlap between the patches in pixels
    #                     (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
    tsub = 2  # downsampling factor in time for initialization,
    #                     increase if you have memory problems
    ssub = 1  # downsampling factor in space for initialization,
    #                     increase if you have memory problems
    Ain = None  # if you want to initialize with some preselected components
    #                     you can pass them here as boolean vectors
    low_rank_background = None  # None leaves background of each patch intact,
    #                             True performs global low-rank approximation
    gnb = -1  # number of background components (rank) if positive,
    #                     else exact ring model with following settings
    #                         gnb=-2: Return background as b and W
    #                         gnb=-1: Return full rank background B
    #                         gnb= 0: Don't return background
    nb_patch = -1  # number of background components (rank) per patch,
    #                     use 0 or -1 for exact background of ring model (cf. gnb)
    min_corr = .8  # min peak value from correlation image
    min_pnr = 10  # min peak to noise ration from PNR image
    ssub_B = 2  # additional downsampling factor in space for background
    ring_size_factor = 1.4  # radius of ring is gSiz*ring_size_factor

    # parameters for component evaluation
    min_SNR = 3  # adaptive way to set threshold on the transient size
    r_values_min = 0.85  # threshold on space consistency (if you lower more components
    #                        will be accepted, potentially with worst quality)

    #%% start the cluster
    try:
        cm.stop_server(dview=dview)  # stop it if it was running
    except:
        pass

    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local',  # use this one
        n_processes=
        24,  # number of process to use, if you go out of memory try to reduce this one
        single_thread=False)

    #%% download demo file
    fnames = [download_demo(fnames[0])]
    filename_reorder = fnames

    #%% MOTION CORRECTION
    if do_motion_correction_nonrigid or do_motion_correction_rigid:
        # do motion correction rigid
        mc = motion_correct_oneP_rigid(
            fnames,
            gSig_filt=gSig_filt,
            max_shifts=max_shifts,
            dview=dview,
            splits_rig=splits_rig,
            save_movie=not (do_motion_correction_nonrigid),
            border_nan='copy')

        new_templ = mc.total_template_rig

        plt.subplot(1, 2, 1)
        plt.imshow(new_templ)  # % plot template
        plt.subplot(1, 2, 2)
        plt.plot(mc.shifts_rig)  # % plot rigid shifts
        plt.legend(['x shifts', 'y shifts'])
        plt.xlabel('frames')
        plt.ylabel('pixels')

        # borders to eliminate from movie because of motion correction
        bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
        filename_reorder = mc.fname_tot_rig

        # do motion correction nonrigid
        if do_motion_correction_nonrigid:
            mc = motion_correct_oneP_nonrigid(
                fnames,
                gSig_filt=gSig_filt,
                max_shifts=max_shifts,
                strides=strides,
                overlaps=overlaps,
                splits_els=splits_els,
                upsample_factor_grid=upsample_factor_grid,
                max_deviation_rigid=max_deviation_rigid,
                dview=dview,
                splits_rig=None,
                save_movie=True,  # whether to save movie in memory mapped format
                new_templ=new_templ,  # template to initialize motion correction
                border_nan='copy')

            filename_reorder = mc.fname_tot_els
            bord_px = np.ceil(
                np.maximum(np.max(np.abs(mc.x_shifts_els)),
                           np.max(np.abs(mc.y_shifts_els)))).astype(np.int)

    # create memory mappable file in the right order on the hard drive (C order)
    fname_new = cm.save_memmap(filename_reorder,
                               base_name='memmap_',
                               order='C',
                               border_to_0=bord_px,
                               dview=dview)

    # load memory mappable file
    Yr, dims, T = cm.load_memmap(fname_new)
    Y = Yr.T.reshape((T, ) + dims, order='F')
    #%% compute some summary images (correlation and peak to noise)
    # change swap dim if output looks weird, it is a problem with tiffile
    cn_filter, pnr = cm.summary_images.correlation_pnr(Y,
                                                       gSig=gSig,
                                                       swap_dim=False)
    # inspect the summary images and set the parameters
    inspect_correlation_pnr(cn_filter, pnr)
    # print parameters set above, modify them if necessary based on summary images
    print(min_corr)  # min correlation of peak (from correlation image)
    print(min_pnr)  # min peak to noise ratio

    #%% RUN CNMF ON PATCHES
    cnm = cnmf.CNMF(
        n_processes=n_processes,
        method_init='corr_pnr',  # use this for 1 photon
        k=K,
        gSig=(gSig, gSig),
        gSiz=(gSiz, gSiz),
        merge_thresh=merge_thresh,
        p=p,
        dview=dview,
        tsub=tsub,
        ssub=ssub,
        Ain=Ain,
        rf=rf,
        stride=stride_cnmf,
        only_init_patch=True,  # just leave it as is
        gnb=gnb,
        nb_patch=nb_patch,
        method_deconvolution='oasis',  # could use 'cvxpy' alternatively
        low_rank_background=low_rank_background,
        update_background_components=
        True,  # sometimes setting to False improve the results
        min_corr=min_corr,
        min_pnr=min_pnr,
        normalize_init=False,  # just leave as is
        center_psf=True,  # leave as is for 1 photon
        ssub_B=ssub_B,
        ring_size_factor=ring_size_factor,
        del_duplicates=True,  # whether to remove duplicates from initialization
        border_pix=bord_px)  # number of pixels to not consider in the borders
    cnm.fit(Y)

    #%% DISCARD LOW QUALITY COMPONENTS
    cnm.evaluate_components(Y,
                            min_SNR=min_SNR,
                            rval_thr=r_values_min,
                            use_cnn=False,
                            decay_time=decay_time,
                            fr=frate)

    print(' ***** ')
    print('Number of total components: ', len(cnm.C))
    print('Number of accepted components: ', len(cnm.idx_components))

    #%% PLOT COMPONENTS
    cnm.dims = dims
    if display_images:
        cnm.plot_contours(img=cn_filter, idx=cnm.idx_components)
        cnm.view_components(Y, dims, idx=cnm.idx_components)

#%% MOVIES
    if display_images:
        # fully reconstructed movie
        cnm.play_movie(Y, magnification=3, include_bck=True, gain_res=10)
        # movie without background
        cnm.play_movie(Y, magnification=3, include_bck=False, gain_res=4)

#%% STOP SERVER
    cm.stop_server(dview=dview)
示例#29
0
                                    single_thread=False)
        fname_zip = os.path.join(base_folder,
                                 params_movie['fname'].split('/')[0], 'images',
                                 'images.zip')
        mov_names = glob.glob(
            os.path.join(base_folder, params_movie['fname'].split('/')[0],
                         'images', '*.tif'))
        if len(mov_names) > 0:
            mov_names = sorted(mov_names,
                               key=lambda x: np.int(x.split('_')[-1][:-4]))
        else:
            mov_names = from_zipfiles_to_movie_lists(fname_zip)

        # add_to_mov = cm.load(mov_names[0]).min()
        fname_zip = cm.save_memmap(mov_names,
                                   dview=dview,
                                   order='C',
                                   add_to_movie=0)
        shutil.move(fname_zip,
                    fname_new)  # we get it from the images subfolder

    try:
        cm.stop_server()
        dview.terminate()
    except:
        print('No clusters to stop')

    c, dview, n_processes = setup_cluster(backend=backend_patch,
                                          n_processes=n_processes,
                                          single_thread=False)

    # %% LOAD MEMMAP FILE
示例#30
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

#%% Select file(s) to be processed (download if not present)
    fnames = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
        fnames = [download_demo(fnames[0])]

#%% First setup some parameters for data and motion correction

    # dataset dependent parameters
    fr = 30             # imaging rate in frames per second
    decay_time = 0.4    # length of a typical transient in seconds
    dxy = (2., 2.)      # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)       # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um

    # motion correction parameters
    pw_rigid = True       # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
    # start a new patch for pw-rigid motion correction every x pixels
    strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
    # overlap between pathes (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

# %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q
    display_images = True

    if display_images:
        m_orig = cm.load_movie_chain(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # note that the file is not loaded in memory

# %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

# %% compare with original movie
    if display_images:
        m_orig = cm.load_movie_chain(fnames)
        m_els = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie,
                                      m_els.resize(1, 1, ds_ratio)], axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit

# %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file, base_name='memmap_', order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

# %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%  parameters for source extraction and deconvolution
    p = 1                    # order of the autoregressive system
    gnb = 2                  # number of global background components
    merge_thr = 0.85         # merging threshold, max correlation allowed
    rf = 15
    # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6          # amount of overlap between the patches in pixels
    K = 4                    # number of components per patch
    gSig = [4, 4]            # expected half size of neurons in pixels
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    method_init = 'greedy_roi'
    ssub = 2                     # spatial subsampling during initialization
    tsub = 2                     # temporal subsampling during intialization

    # parameters for component evaluation
    opts_dict = {'fnames': fnames,
                 'p': p,
                 'fr': fr,
                 'nb': gnb,
                 'rf': rf,
                 'K': K,
                 'gSig': gSig,
                 'stride': stride_cnmf,
                 'method_init': method_init,
                 'rolling_sum': True,
                 'merge_thr': merge_thr,
                 'n_processes': n_processes,
                 'only_init': True,
                 'ssub': ssub,
                 'tsub': tsub}

    opts.change_params(params_dict=opts_dict);
# %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0). If you want to have
    # deconvolution within each patch change params.patch['p_patch'] to a
    # nonzero value

    #opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

# %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)

# %% plot contours of found components
    Cns = local_correlations_movie_offline(mc.mmap_file[0],
                                           remove_baseline=True, window=1000, stride=1000,
                                           winSize_baseline=100, quantil_min_baseline=10,
                                           dview=dview)
    Cn = Cns.max(axis=0)
    Cn[np.isnan(Cn)] = 0
    cnm.estimates.plot_contours(img=Cn)
    plt.title('Contour plots of found components')
#%% save results
    cnm.estimates.Cn = Cn
    cnm.save(fname_new[:-5]+'_init.hdf5')

# %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # signal to noise ratio for accepting a component
    rval_thr = 0.85  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected

    cnm2.params.set('quality', {'decay_time': decay_time,
                               'min_SNR': min_SNR,
                               'rval_thr': rval_thr,
                               'use_cnn': True,
                               'min_cnn_thr': cnn_thr,
                               'cnn_lowest': cnn_lowest})
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)
    # %% PLOT COMPONENTS
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)

    # %% VIEW TRACES (accepted and rejected)

    if display_images:
        cnm2.estimates.view_components(images, img=Cn,
                                      idx=cnm2.estimates.idx_components)
        cnm2.estimates.view_components(images, img=Cn,
                                      idx=cnm2.estimates.idx_components_bad)
    #%% update object with selected components
    cnm2.estimates.select_components(use_object=True)
    #%% Extract DF/F values
    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)

    #%% Show final traces
    cnm2.estimates.view_components(img=Cn)
    #%%
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')
    #%% reconstruct denoised movie (press q to exit)
    if display_images:
        cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2,
                                  magnification=2,
                                  bpx=border_to_0,
                                  include_bck=False)  # background not shown

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
示例#31
0
# cm.movie(Y).play(fr=30, magnification=2)


gSig = 3   # gaussian width of a 2D gaussian kernel, which approximates a neuron
gSiz = 10  # average diameter of a neuron
min_corr = .9
min_pnr = 15
# If True, the background can be roughly removed. This is useful when the background is strong.
center_psf = True
K = 200


#%%
whole_FOV = True
if whole_FOV:
    fname_new = cm.save_memmap([Y], base_name='Yr')
    dims = dims_in
else:

    fname_new = cm.save_memmap([Y], base_name='Yr',
                               idx_xy=(slice(120, 2 * 120), slice(120, 2 * 120)))
    dims = (120, 120)

Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T,) + dims, order='F')

cn_filter, pnr = cm.summary_images.correlation_pnr(
    Y, gSig=gSig, center_psf=center_psf, swap_dim=False)


#%%
示例#32
0
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    mc.motion_correct(save_movie=True)
    fname_mc = mc.fname_tot_els if pw_rigid else mc.fname_tot_rig
    if pw_rigid:
        bord_px = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)),
                                     np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
    else:
        bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
        plt.subplot(1, 2, 1); plt.imshow(mc.total_template_rig)  # % plot template
        plt.subplot(1, 2, 2); plt.plot(mc.shifts_rig)  # % plot rigid shifts
        plt.legend(['x shifts', 'y shifts'])
        plt.xlabel('frames')
        plt.ylabel('pixels');plt.savefig(newpath + r'/' + "shift.pdf",edgecolor='w',format='pdf',transparent=True)

    bord_px = 0 if border_nan is 'copy' else bord_px
    fname_new = cm.save_memmap(fname_mc, base_name='memmap_', order='C',
                               border_to_0=bord_px)
else:  # if no motion correction just memory map the file
    fname_new = cm.save_memmap(filename_reorder, base_name='memmap_',
                               order='C', border_to_0=0, dview=dview)

print('Motion correction has been done!')
m_els = cm.load(fname_mc)

# save motion corrected video as mat file

mc_name = os.path.join(newpath,"motioncorrected.tif")
#vid=np.array(m_els).astype('uint8')
#from scipy.io import savemat
#savemat(mc_name,{'vid':vid},format="5",do_compression=True)
m_els.save(mc_name)
m_els.save(os.path.join(newpath,'ms_mc.avi'))
def motion_correct(video_path, max_shift, patch_stride, patch_overlap, use_multiprocessing=True):
    full_video_path = video_path

    directory = os.path.dirname(full_video_path)
    filename  = os.path.basename(full_video_path)

    memmap_video = tifffile.memmap(video_path)

    if use_multiprocessing:
        if os.name == 'nt':
            backend = 'multiprocessing'
        else:
            backend = 'ipyparallel'

        # Create the cluster
        cm.stop_server()
        c, dview, n_processes = cm.cluster.setup_cluster(backend=backend, n_processes=None, single_thread=False)
    else:
        dview = None

    z_range = list(range(memmap_video.shape[1]))

    new_video_path = os.path.join(directory, "mc_video_temp.tif")

    shutil.copyfile(video_path, new_video_path)

    mc_video = tifffile.memmap(new_video_path).astype(np.uint16)

    mc_borders = [ None for z in z_range ]

    counter = 0

    for z in z_range:
        print("Motion correcting plane z={}...".format(z))
        video_path = os.path.join(directory, os.path.splitext(filename)[0] + "_z_{}_temp.tif".format(z))
        tifffile.imsave(video_path, memmap_video[:, z, :, :])

        mc_video[:, z, :, :] *= 0

        # --- PARAMETERS --- #

        params_movie = {'fname': video_path,
                        'max_shifts': (max_shift, max_shift),  # maximum allow rigid shift (2,2)
                        'niter_rig': 3,
                        'splits_rig': 1,  # for parallelization split the movies in  num_splits chuncks across time
                        'num_splits_to_process_rig': None,  # if none all the splits are processed and the movie is saved
                        'strides': (patch_stride, patch_stride),  # intervals at which patches are laid out for motion correction
                        'overlaps': (patch_overlap, patch_overlap),  # overlap between pathes (size of patch strides+overlaps)
                        'splits_els': 1,  # for parallelization split the movies in  num_splits chuncks across time
                        'num_splits_to_process_els': [None],  # if none all the splits are processed and the movie is saved
                        'upsample_factor_grid': 4,  # upsample factor to avoid smearing when merging patches
                        'max_deviation_rigid': 3,  # maximum deviation allowed for patch with respect to rigid shift         
                        }

        # load movie (in memory!)
        fname = params_movie['fname']
        niter_rig = params_movie['niter_rig']
        # maximum allow rigid shift
        max_shifts = params_movie['max_shifts']  
        # for parallelization split the movies in  num_splits chuncks across time
        splits_rig = params_movie['splits_rig']  
        # if none all the splits are processed and the movie is saved
        num_splits_to_process_rig = params_movie['num_splits_to_process_rig']
        # intervals at which patches are laid out for motion correction
        strides = params_movie['strides']
        # overlap between pathes (size of patch strides+overlaps)
        overlaps = params_movie['overlaps']
        # for parallelization split the movies in  num_splits chuncks across time
        splits_els = params_movie['splits_els'] 
        # if none all the splits are processed and the movie is saved
        num_splits_to_process_els = params_movie['num_splits_to_process_els']
        # upsample factor to avoid smearing when merging patches
        upsample_factor_grid = params_movie['upsample_factor_grid'] 
        # maximum deviation allowed for patch with respect to rigid
        # shift
        max_deviation_rigid = params_movie['max_deviation_rigid']

        # --- RIGID MOTION CORRECTION --- #

        # Load the original movie
        m_orig = tifffile.memmap(fname)
        # m_orig = cm.load(fname)
        min_mov = np.min(m_orig) # movie must be mostly positive for this to work

        offset_mov = -min_mov

        # Create motion correction object
        mc = MotionCorrect(fname, min_mov,
                           dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, 
                           num_splits_to_process_rig=num_splits_to_process_rig, 
                        strides= strides, overlaps= overlaps, splits_els=splits_els,
                        num_splits_to_process_els=num_splits_to_process_els, 
                        upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, 
                        shifts_opencv = True, nonneg_movie = True, border_nan='min')

        # Do rigid motion correction
        mc.motion_correct_rigid(save_movie=False)

        # --- ELASTIC MOTION CORRECTION --- #

        # Do elastic motion correction
        mc.motion_correct_pwrigid(save_movie=True, template=mc.total_template_rig, show_template=False)

        # # Save elastic shift border
        bord_px_els = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)),
                                 np.max(np.abs(mc.y_shifts_els)))).astype(np.int)  
        # np.savez(mc.fname_tot_els + "_bord_px_els.npz", bord_px_els)

        fnames = mc.fname_tot_els   # name of the pw-rigidly corrected file.
        border_to_0 = bord_px_els     # number of pixels to exclude
        fname_new = cm.save_memmap(fnames, base_name='memmap_z_{}'.format(z), order = 'C',
                                   border_to_0 = bord_px_els) # exclude borders

        # now load the file
        Yr, dims, T = cm.load_memmap(fname_new)
        d1, d2 = dims
        images = np.reshape(Yr.T, [T] + list(dims), order='F') 

        mc_borders[z] = bord_px_els

        # images += np.amin(images)

        # print(np.amax(images))
        # print(np.amin(images))
        # print(type(images))

        mc_video[:, z, :, :] = (images - np.amin(images)).astype(np.uint16)

        del m_orig
        os.remove(video_path)

        try:
            os.remove(mc.fname_tot_rig)
            os.remove(mc.fname_tot_els)
        except:
            pass

        counter += 1

    if use_multiprocessing:
        if backend == 'multiprocessing':
            dview.close()
        else:
            try:
                dview.terminate()
            except:
                dview.shutdown()
        cm.stop_server()

    mmap_files = glob.glob(os.path.join(directory, '*.mmap'))
    for mmap_file in mmap_files:
        try:
            os.remove(mmap_file)
        except:
            pass

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    return mc_video, new_video_path, mc_borders