示例#1
0
def demo(parallel=False):

    # roughly number of cores on your machine minus 1
    n_processes = np.maximum(psutil.cpu_count() - 2, 1)
    p = 2  # order of the AR model (in general 1 or 2)
    stop_server()
    if parallel:
        start_server()
        c = Client()

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap(['example_movies/demoMovie.tif'],
                               base_name='Yr')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=c[:] if parallel else None,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        stop_server()

    # verifying the spatial components
    npt.assert_allclose(cnm.A.sum(), 32282000, 1e-3)
    # verifying the temporal components
    npt.assert_allclose(cnm.C.sum(), 640.5, 1e-2)
示例#2
0
def main(n_processes=None, patches=True, rf=64):

    t = -time()

    Yr, dims, T = cm.load_memmap(os.path.abspath('./Yr_d1_253_d2_316_d3_1_order_C_frames_2000_.mmap'))
    Y = Yr.T.reshape((T,) + dims, order='F')

    # c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=n_processes)
    # above line doesn't work cause memory_profiler creates some multiprocessing object itself
    if n_processes is None:
        n_processes = cpu_count()
    dview = Pool(n_processes) if patches else None
    print('{0} processes'.format(n_processes))

    patch_args = dict(nb_patch=0, del_duplicates=True, rf=(rf, rf), stride=(16, 16)) \
        if patches else {}

    cnm = cnmf.CNMF(n_processes=n_processes, method_init='corr_pnr', k=None, dview=dview,
                    gSig=(3, 3), gSiz=(10, 10), merge_thresh=.8, p=1, tsub=2, ssub=1,
                    only_init_patch=True, gnb=0, min_corr=.9, min_pnr=15, normalize_init=False,
                    ring_size_factor=1.5, center_psf=True, ssub_B=2, init_iter=1, **patch_args)
    cnm.fit(Y)
    if patches:
        dview.terminate()
    t += time()
    sleep(1)  # just in case Pool takes some time to terminate
    return t
示例#3
0
    def fit_cnmfe(self):
        """
        Do CNMF-E

        """
        self.cnm = cnmf.CNMF(n_processes=self.n_processes,
                             dview=self.dview,
                             Ain=None,
                             params=self.opts)
        self.cnm.fit(self.images)
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap(
        [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')],
        base_name='Yr',
        order='C')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=dview,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server(dview=dview)

    # verifying the spatial components
    npt.assert_allclose(cnm.estimates.A.sum(), 281.1, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.estimates.C.sum(), 66271668, 1e-2)
    try:
        dview.terminate()
    except:
        pass
示例#5
0
def pipeline(D):
    #%% GENERATE GROUND TRUTH DATA
    Yr, trueC, trueS, trueA, centers, dims = gen_data(D)
    N, T = trueC.shape
    # INIT
    params = caiman.source_extraction.cnmf.params.CNMFParams(
        dims=dims,
        k=4,
        gSig=[2, 2, 2][:D],
        p=1,
        n_pixels_per_process=np.prod(dims),
        block_size_spat=np.prod(dims),
        block_size_temp=np.prod(dims))
    params.spatial['thr_method'] = 'nrg'
    params.spatial['extract_cc'] = False
    cnm = cnmf.CNMF(2, params=params)
    # FIT
    images = np.reshape(Yr.T, (T, ) + dims, order='F')
    cnm = cnm.fit(images)

    # VERIFY HIGH CORRELATION WITH GROUND TRUTH
    sorting = [
        np.argmax([np.corrcoef(tc, c)[0, 1] for tc in trueC])
        for c in cnm.estimates.C
    ]
    # verifying the temporal components
    corr = [
        np.corrcoef(trueC[sorting[i]], cnm.estimates.C[i])[0, 1]
        for i in range(N)
    ]
    npt.assert_allclose(corr, 1, .05)
    # verifying the spatial components
    corr = [
        np.corrcoef(
            np.reshape(trueA, (-1, 4), order='F')[:, sorting[i]],
            cnm.estimates.A.toarray()[:, i])[0, 1] for i in range(N)
    ]
    npt.assert_allclose(corr, 1, .05)
示例#6
0
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap([
        os.path.abspath(cm.__path__[0][:-7]) + '/example_movies/demoMovie.tif'
    ],
                               base_name='Yr')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=dview,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server()

    # verifying the spatial components
    npt.assert_allclose(cnm.A.sum(), 31913160, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.C.sum(), 640.5, 1e-2)
示例#7
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% First setup some parameters

    # dataset dependent parameters
    display_images = False  # Set to true to show movies and images
    fnames = ['data_endoscope.tif']  # filename to be processed
    frate = 10  # movie frame rate
    decay_time = 0.4  # length of a typical transient in seconds

    # motion correction parameters
    do_motion_correction_nonrigid = True
    do_motion_correction_rigid = False  # in this case it will also save a rigid motion corrected movie
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    #                      change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    splits_rig = 10  # for parallelization split the movies in  num_splits chuncks across time
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    # for parallelization split the movies in  num_splits chuncks across time
    # (remember that it should hold that length_movie/num_splits_to_process_rig>100)
    splits_els = 10
    upsample_factor_grid = 4  # upsample factor to avoid smearing when merging patches
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    # parameters for source extraction and deconvolution
    p = 1  # order of the autoregressive system
    K = None  # upper bound on number of components per patch, in general None
    gSig = 3  # gaussian width of a 2D gaussian kernel, which approximates a neuron
    gSiz = 13  # average diameter of a neuron, in general 4*gSig+1
    merge_thresh = .7  # merging threshold, max correlation allowed
    rf = 40  # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
    stride_cnmf = 20  # amount of overlap between the patches in pixels
    #                     (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
    tsub = 2  # downsampling factor in time for initialization,
    #                     increase if you have memory problems
    ssub = 1  # downsampling factor in space for initialization,
    #                     increase if you have memory problems
    Ain = None  # if you want to initialize with some preselected components
    #                     you can pass them here as boolean vectors
    low_rank_background = None  # None leaves background of each patch intact,
    #                             True performs global low-rank approximation
    gnb = -1  # number of background components (rank) if positive,
    #                     else exact ring model with following settings
    #                         gnb=-2: Return background as b and W
    #                         gnb=-1: Return full rank background B
    #                         gnb= 0: Don't return background
    nb_patch = -1  # number of background components (rank) per patch,
    #                     use 0 or -1 for exact background of ring model (cf. gnb)
    min_corr = .8  # min peak value from correlation image
    min_pnr = 10  # min peak to noise ration from PNR image
    ssub_B = 2  # additional downsampling factor in space for background
    ring_size_factor = 1.4  # radius of ring is gSiz*ring_size_factor

    # parameters for component evaluation
    min_SNR = 3  # adaptive way to set threshold on the transient size
    r_values_min = 0.85  # threshold on space consistency (if you lower more components
    #                        will be accepted, potentially with worst quality)

    #%% start the cluster
    try:
        cm.stop_server(dview=dview)  # stop it if it was running
    except:
        pass

    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local',  # use this one
        n_processes=
        24,  # number of process to use, if you go out of memory try to reduce this one
        single_thread=False)

    #%% download demo file
    fnames = [download_demo(fnames[0])]
    filename_reorder = fnames

    #%% MOTION CORRECTION
    if do_motion_correction_nonrigid or do_motion_correction_rigid:
        # do motion correction rigid
        mc = motion_correct_oneP_rigid(
            fnames,
            gSig_filt=gSig_filt,
            max_shifts=max_shifts,
            dview=dview,
            splits_rig=splits_rig,
            save_movie=not (do_motion_correction_nonrigid),
            border_nan='copy')

        new_templ = mc.total_template_rig

        plt.subplot(1, 2, 1)
        plt.imshow(new_templ)  # % plot template
        plt.subplot(1, 2, 2)
        plt.plot(mc.shifts_rig)  # % plot rigid shifts
        plt.legend(['x shifts', 'y shifts'])
        plt.xlabel('frames')
        plt.ylabel('pixels')

        # borders to eliminate from movie because of motion correction
        bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
        filename_reorder = mc.fname_tot_rig

        # do motion correction nonrigid
        if do_motion_correction_nonrigid:
            mc = motion_correct_oneP_nonrigid(
                fnames,
                gSig_filt=gSig_filt,
                max_shifts=max_shifts,
                strides=strides,
                overlaps=overlaps,
                splits_els=splits_els,
                upsample_factor_grid=upsample_factor_grid,
                max_deviation_rigid=max_deviation_rigid,
                dview=dview,
                splits_rig=None,
                save_movie=True,  # whether to save movie in memory mapped format
                new_templ=new_templ,  # template to initialize motion correction
                border_nan='copy')

            filename_reorder = mc.fname_tot_els
            bord_px = np.ceil(
                np.maximum(np.max(np.abs(mc.x_shifts_els)),
                           np.max(np.abs(mc.y_shifts_els)))).astype(np.int)

    # create memory mappable file in the right order on the hard drive (C order)
    fname_new = cm.save_memmap(filename_reorder,
                               base_name='memmap_',
                               order='C',
                               border_to_0=bord_px,
                               dview=dview)

    # load memory mappable file
    Yr, dims, T = cm.load_memmap(fname_new)
    Y = Yr.T.reshape((T, ) + dims, order='F')
    #%% compute some summary images (correlation and peak to noise)
    # change swap dim if output looks weird, it is a problem with tiffile
    cn_filter, pnr = cm.summary_images.correlation_pnr(Y,
                                                       gSig=gSig,
                                                       swap_dim=False)
    # inspect the summary images and set the parameters
    inspect_correlation_pnr(cn_filter, pnr)
    # print parameters set above, modify them if necessary based on summary images
    print(min_corr)  # min correlation of peak (from correlation image)
    print(min_pnr)  # min peak to noise ratio

    #%% RUN CNMF ON PATCHES
    cnm = cnmf.CNMF(
        n_processes=n_processes,
        method_init='corr_pnr',  # use this for 1 photon
        k=K,
        gSig=(gSig, gSig),
        gSiz=(gSiz, gSiz),
        merge_thresh=merge_thresh,
        p=p,
        dview=dview,
        tsub=tsub,
        ssub=ssub,
        Ain=Ain,
        rf=rf,
        stride=stride_cnmf,
        only_init_patch=True,  # just leave it as is
        gnb=gnb,
        nb_patch=nb_patch,
        method_deconvolution='oasis',  # could use 'cvxpy' alternatively
        low_rank_background=low_rank_background,
        update_background_components=
        True,  # sometimes setting to False improve the results
        min_corr=min_corr,
        min_pnr=min_pnr,
        normalize_init=False,  # just leave as is
        center_psf=True,  # leave as is for 1 photon
        ssub_B=ssub_B,
        ring_size_factor=ring_size_factor,
        del_duplicates=True,  # whether to remove duplicates from initialization
        border_pix=bord_px)  # number of pixels to not consider in the borders
    cnm.fit(Y)

    #%% DISCARD LOW QUALITY COMPONENTS
    cnm.evaluate_components(Y,
                            min_SNR=min_SNR,
                            rval_thr=r_values_min,
                            use_cnn=False,
                            decay_time=decay_time,
                            fr=frate)

    print(' ***** ')
    print('Number of total components: ', len(cnm.C))
    print('Number of accepted components: ', len(cnm.idx_components))

    #%% PLOT COMPONENTS
    cnm.dims = dims
    if display_images:
        cnm.plot_contours(img=cn_filter, idx=cnm.idx_components)
        cnm.view_components(Y, dims, idx=cnm.idx_components)

#%% MOVIES
    if display_images:
        # fully reconstructed movie
        cnm.play_movie(Y, magnification=3, include_bck=True, gain_res=10)
        # movie without background
        cnm.play_movie(Y, magnification=3, include_bck=False, gain_res=4)

#%% STOP SERVER
    cm.stop_server(dview=dview)
示例#8
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %% start the cluster
    try:
        cm.stop_server()  # stop it if it was running
    except ():
        pass

    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local',
        n_processes=
        24,  # number of process to use, if you go out of memory try to reduce this one
        single_thread=False)

    # %% First setup some parameters for motion correction
    # dataset dependent parameters
    fnames = ['data_endoscope.tif']  # filename to be processed
    fnames = [download_demo(fnames[0])]  # download file if not already present
    filename_reorder = fnames
    fr = 10  # movie frame rate
    decay_time = 0.4  # length of a typical transient in seconds

    # motion correction parameters
    motion_correct = True  # flag for motion correction
    pw_rigid = False  # flag for pw-rigid motion correction

    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    #                      change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3
    border_nan = 'copy'

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = params.CNMFParams(params_dict=mc_dict)

    # %% MOTION CORRECTION
    #  The pw_rigid flag set above, determines where to use rigid or pw-rigid
    #  motion correction
    if motion_correct:
        # do motion correction rigid
        mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
        mc.motion_correct(save_movie=True)
        fname_mc = mc.fname_tot_els if pw_rigid else mc.fname_tot_rig
        if pw_rigid:
            bord_px = np.ceil(
                np.maximum(np.max(np.abs(mc.x_shifts_els)),
                           np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
        else:
            bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
            plt.subplot(1, 2, 1)
            plt.imshow(mc.total_template_rig)  # % plot template
            plt.subplot(1, 2, 2)
            plt.plot(mc.shifts_rig)  # % plot rigid shifts
            plt.legend(['x shifts', 'y shifts'])
            plt.xlabel('frames')
            plt.ylabel('pixels')

        bord_px = 0 if border_nan == 'copy' else bord_px
        fname_new = cm.save_memmap(fname_mc,
                                   base_name='memmap_',
                                   order='C',
                                   border_to_0=bord_px)
    else:  # if no motion correction just memory map the file
        fname_new = cm.save_memmap(filename_reorder,
                                   base_name='memmap_',
                                   order='C',
                                   border_to_0=0,
                                   dview=dview)

    # load memory mappable file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = Yr.T.reshape((T, ) + dims, order='F')

    # %% Parameters for source extraction and deconvolution (CNMF-E algorithm)

    p = 1  # order of the autoregressive system
    K = None  # upper bound on number of components per patch, in general None for 1p data
    gSig = (
        3, 3
    )  # gaussian width of a 2D gaussian kernel, which approximates a neuron
    gSiz = (13, 13)  # average diameter of a neuron, in general 4*gSig+1
    Ain = None  # possibility to seed with predetermined binary masks
    merge_thr = .7  # merging threshold, max correlation allowed
    rf = 40  # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
    stride_cnmf = 20  # amount of overlap between the patches in pixels
    #                     (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
    tsub = 2  # downsampling factor in time for initialization,
    #                     increase if you have memory problems
    ssub = 1  # downsampling factor in space for initialization,
    #                     increase if you have memory problems
    #                     you can pass them here as boolean vectors
    low_rank_background = None  # None leaves background of each patch intact,
    #                     True performs global low-rank approximation if gnb>0
    gnb = 0  # number of background components (rank) if positive,
    #                     else exact ring model with following settings
    #                         gnb= 0: Return background as b and W
    #                         gnb=-1: Return full rank background B
    #                         gnb<-1: Don't return background
    nb_patch = 0  # number of background components (rank) per patch if gnb>0,
    #                     else it is set automatically
    min_corr = .8  # min peak value from correlation image
    min_pnr = 10  # min peak to noise ration from PNR image
    ssub_B = 2  # additional downsampling factor in space for background
    ring_size_factor = 1.4  # radius of ring is gSiz*ring_size_factor

    opts.change_params(
        params_dict={
            'dims': dims,
            'method_init': 'corr_pnr',  # use this for 1 photon
            'K': K,
            'gSig': gSig,
            'gSiz': gSiz,
            'merge_thr': merge_thr,
            'p': p,
            'tsub': tsub,
            'ssub': ssub,
            'rf': rf,
            'stride': stride_cnmf,
            'only_init': True,  # set it to True to run CNMF-E
            'nb': gnb,
            'nb_patch': nb_patch,
            'method_deconvolution': 'oasis',  # could use 'cvxpy' alternatively
            'low_rank_background': low_rank_background,
            'update_background_components':
            True,  # sometimes setting to False improve the results
            'min_corr': min_corr,
            'min_pnr': min_pnr,
            'normalize_init': False,  # just leave as is
            'center_psf': True,  # leave as is for 1 photon
            'ssub_B': ssub_B,
            'ring_size_factor': ring_size_factor,
            'del_duplicates':
            True,  # whether to remove duplicates from initialization
            'border_pix': bord_px
        })  # number of pixels to not consider in the borders)

    # %% compute some summary images (correlation and peak to noise)
    # change swap dim if output looks weird, it is a problem with tiffile
    cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1],
                                                       gSig=gSig[0],
                                                       swap_dim=False)
    # if your images file is too long this computation will take unnecessarily
    # long time and consume a lot of memory. Consider changing images[::1] to
    # images[::5] or something similar to compute on a subset of the data

    # inspect the summary images and set the parameters
    inspect_correlation_pnr(cn_filter, pnr)
    # print parameters set above, modify them if necessary based on summary images
    print(min_corr)  # min correlation of peak (from correlation image)
    print(min_pnr)  # min peak to noise ratio

    # %% RUN CNMF ON PATCHES
    cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts)
    cnm.fit(images)

    # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #    cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #    cnm1.fit_file(motion_correct=True)

    # %% DISCARD LOW QUALITY COMPONENTS
    min_SNR = 2.5  # adaptive way to set threshold on the transient size
    r_values_min = 0.85  # threshold on space consistency (if you lower more components
    #                        will be accepted, potentially with worst quality)
    cnm.params.set('quality', {
        'min_SNR': min_SNR,
        'rval_thr': r_values_min,
        'use_cnn': False
    })
    cnm.estimates.evaluate_components(images, cnm.params, dview=dview)

    print(' ***** ')
    print('Number of total components: ', len(cnm.estimates.C))
    print('Number of accepted components: ', len(cnm.estimates.idx_components))

    # %% PLOT COMPONENTS
    cnm.dims = dims
    display_images = True  # Set to true to show movies and images
    if display_images:
        cnm.estimates.plot_contours(img=cn_filter,
                                    idx=cnm.estimates.idx_components)
        cnm.estimates.view_components(images, idx=cnm.estimates.idx_components)

# %% MOVIES
    display_images = False  # Set to true to show movies and images
    if display_images:
        # fully reconstructed movie
        cnm.estimates.play_movie(images,
                                 q_max=99.5,
                                 magnification=2,
                                 include_bck=True,
                                 gain_res=10,
                                 bpx=bord_px)
        # movie without background
        cnm.estimates.play_movie(images,
                                 q_max=99.9,
                                 magnification=2,
                                 include_bck=False,
                                 gain_res=4,
                                 bpx=bord_px)

# %% STOP SERVER
    cm.stop_server(dview=dview)
示例#9
0
def initialize_movie(Y,
                     K,
                     gSig,
                     rf,
                     stride,
                     base_name,
                     p=1,
                     merge_thresh=0.95,
                     rval_thr_online=0.9,
                     thresh_fitness_delta_online=-30,
                     thresh_fitness_raw_online=-50,
                     rval_thr_init=.5,
                     thresh_fitness_delta_init=-20,
                     thresh_fitness_raw_init=-20,
                     rval_thr_refine=0.95,
                     thresh_fitness_delta_refine=-100,
                     thresh_fitness_raw_refine=-100,
                     final_frate=10,
                     Npeaks=10,
                     single_thread=True,
                     dview=None):

    _, d1, d2 = Y.shape
    dims = (d1, d2)
    Yr = Y.to_2D().T
    # merging threshold, max correlation allowed
    # order of the autoregressive system
    #T = Y.shape[0]
    base_name = base_name + '.mmap'
    fname_new = Y.save(base_name, order='C')
    #%
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Y = np.reshape(Yr, dims + (T, ), order='F')
    Cn2 = cm.local_correlations(Y)
    #    pl.imshow(Cn2)
    #%
    #% RUN ALGORITHM ON PATCHES
    #    pl.close('all')
    cnm_init = cnmf.CNMF(n_processes,
                         method_init='greedy_roi',
                         k=K,
                         gSig=gSig,
                         merge_thresh=merge_thresh,
                         p=0,
                         dview=dview,
                         Ain=None,
                         rf=rf,
                         stride=stride,
                         method_deconvolution='oasis',
                         skip_refinement=False,
                         normalize_init=False,
                         options_local_NMF=None,
                         minibatch_shape=100,
                         minibatch_suff_stat=5,
                         update_num_comps=True,
                         rval_thr=rval_thr_online,
                         thresh_fitness_delta=thresh_fitness_delta_online,
                         thresh_fitness_raw=thresh_fitness_raw_online,
                         batch_update_suff_stat=True,
                         max_comp_update_shape=5)

    cnm_init = cnm_init.fit(images)
    A_tot = cnm_init.A
    C_tot = cnm_init.C
    YrA_tot = cnm_init.YrA
    b_tot = cnm_init.b
    f_tot = cnm_init.f

    print(('Number of components:' + str(A_tot.shape[-1])))

    #%

    traces = C_tot + YrA_tot
    #        traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
    #        traces_b=np.diff(traces,axis=1)
    fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = evaluate_components(
        Y,
        traces,
        A_tot,
        C_tot,
        b_tot,
        f_tot,
        final_frate,
        remove_baseline=True,
        N=5,
        robust_std=False,
        Athresh=0.1,
        Npeaks=Npeaks,
        thresh_C=0.3)

    idx_components_r = np.where(r_values >= rval_thr_init)[0]
    idx_components_raw = np.where(fitness_raw < thresh_fitness_raw_init)[0]
    idx_components_delta = np.where(
        fitness_delta < thresh_fitness_delta_init)[0]

    idx_components = np.union1d(idx_components_r, idx_components_raw)
    idx_components = np.union1d(idx_components, idx_components_delta)
    idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)

    print(('Keeping ' + str(len(idx_components)) + ' and discarding  ' +
           str(len(idx_components_bad))))

    A_tot = A_tot.tocsc()[:, idx_components]
    C_tot = C_tot[idx_components]
    #%
    cnm_refine = cnmf.CNMF(n_processes,
                           method_init='greedy_roi',
                           k=A_tot.shape,
                           gSig=gSig,
                           merge_thresh=merge_thresh,
                           rf=None,
                           stride=None,
                           p=p,
                           dview=dview,
                           Ain=A_tot,
                           Cin=C_tot,
                           f_in=f_tot,
                           method_deconvolution='oasis',
                           skip_refinement=True,
                           normalize_init=False,
                           options_local_NMF=None,
                           minibatch_shape=100,
                           minibatch_suff_stat=5,
                           update_num_comps=True,
                           rval_thr=rval_thr_refine,
                           thresh_fitness_delta=thresh_fitness_delta_refine,
                           thresh_fitness_raw=thresh_fitness_raw_refine,
                           batch_update_suff_stat=True,
                           max_comp_update_shape=5)

    cnm_refine = cnm_refine.fit(images)
    #%
    A, C, b, f, YrA, sn = cnm_refine.A, cnm_refine.C, cnm_refine.b, cnm_refine.f, cnm_refine.YrA, cnm_refine.sn
    #%
    final_frate = 10
    Npeaks = 10
    traces = C + YrA

    fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
        evaluate_components(Y, traces, A, C, b, f, final_frate, remove_baseline=True,
                            N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks,  thresh_C=0.3)

    idx_components_r = np.where(r_values >= rval_thr_refine)[0]
    idx_components_raw = np.where(fitness_raw < thresh_fitness_raw_refine)[0]
    idx_components_delta = np.where(
        fitness_delta < thresh_fitness_delta_refine)[0]

    idx_components = np.union1d(idx_components_r, idx_components_raw)
    idx_components = np.union1d(idx_components, idx_components_delta)
    idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)

    print(' ***** ')
    print((len(traces)))
    print((len(idx_components)))
    #%
    cnm_refine.idx_components = idx_components
    cnm_refine.idx_components_bad = idx_components_bad
    cnm_refine.r_values = r_values
    cnm_refine.fitness_raw = fitness_raw
    cnm_refine.fitness_delta = fitness_delta
    cnm_refine.Cn2 = Cn2

    #%

    #    cnm_init.dview = None
    #    save_object(cnm_init,fls[0][:-4]+ '_DS_' + str(ds)+ '_init.pkl')

    return cnm_refine, Cn2, fname_new
示例#10
0
def run(work_dir: str, UUID: str, save_temp_files: str):
    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format=
        "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s"
    )

    start_time = time()

    batch_dir = os.environ['CURR_BATCH_DIR']
    save_temp_files = bool(int(save_temp_files))

    output = {'status': 0, 'output_info': ''}
    n_processes = int(os.environ['_MESMERIZE_N_THREADS'])

    filepath = os.path.join(work_dir, UUID)

    imgpath = f'{filepath}_input.tiff'
    input_params = pickle.load(open(f'{filepath}.params', 'rb'))

    print('******** Creating process pool *********')
    c, dview, n_processes = setup_cluster(backend='local',
                                          n_processes=n_processes,
                                          single_thread=False,
                                          ignore_preexisting=True)

    try:
        if input_params['use_memmap']:
            memmap_uuid = input_params['memmap_uuid']

            memmap_batchdir = glob(
                os.path.join(batch_dir, f'memmap-{memmap_uuid}*.mmap'))

            # Check batch dir
            if len(memmap_batchdir) > 0:
                memmap_path = memmap_batchdir[0]
                print(
                    f'********** Found existing memmap in batch dir: {memmap_path} ********** '
                )

                # copy to work dir
                if not os.path.samefile(batch_dir, work_dir):
                    print('**** Copying memmap to work dir ****')
                    shutil.copy(memmap_path, work_dir)
                    memmap_path = glob(
                        os.path.join(work_dir,
                                     f'memmap-{memmap_uuid}*.mmap'))[0]

            else:
                # remake the memmap with the same UUID so that future batch items can rely on it
                print(
                    '********** Memmap not found, re-making memmap with the same UUID **********'
                )
                memmap_path = cm.save_memmap([imgpath],
                                             base_name=f'memmap-{memmap_uuid}',
                                             is_3D=True,
                                             order='C',
                                             dview=dview)

        else:
            print('********** Making memmap **********')
            memmap_path = cm.save_memmap([imgpath],
                                         base_name=f'memmap-{UUID}',
                                         is_3D=True,
                                         order='C',
                                         dview=dview)

        print(f'Using memmap:\n{memmap_path}')

        print('********** Loading memmap **********')
        Yr, dims, T = cm.load_memmap(memmap_path)
        Y = np.reshape(Yr, dims + (T, ), order='F')

        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        if input_params['use_patches']:
            cnm = cnmf.CNMF(n_processes=n_processes,
                            dview=dview,
                            only_init_patch=True,
                            **input_params['cnmf_kwargs'])

        else:
            cnm = cnmf.CNMF(n_processes,
                            dview=dview,
                            **input_params['cnmf_kwargs'])

        cnm.fit(images)

        print('Number of components:' + str(cnm.estimates.A.shape[-1]))

        cnm.params.change_params(
            params_dict={
                **input_params['eval_kwargs'], 'use_cnn': False
            })

        cnm.estimates.evaluate_components(images, cnm.params, dview=dview)

        if input_params['refit']:
            cnm.params.set('temporal', {'p': input_params['cnmf_kwargs']['p']})
            cnm_ = cnm.refit(images)
        else:
            cnm_ = cnm

        out_filename = f'{UUID}_results.hdf5'
        cnm_.save(out_filename)

        output_files = [out_filename]

        # Save the memmap
        if save_temp_files:
            print("***** Keeping memmap file *****")

            # copy to batch dir if batch_dir != work_dir
            if not os.path.samefile(batch_dir, work_dir):
                print("***** Copying memmap file to batch dir *****")
                shutil.copy(memmap_path, batch_dir)

        # Delete the memmap from the work dir
        if not os.path.samefile(batch_dir, work_dir):
            print("***** Deleting memmap files from work dir *****")
            try:
                os.remove(memmap_path)
            except:
                pass

        output.update({
            'output': UUID,
            'status': 1,
            'output_files': output_files,
            'saved_memmap': save_temp_files
        })

    except Exception as e:
        output.update({'status': 0, 'output_info': traceback.format_exc()})

    cm.stop_server(dview=dview)

    end_time = time()
    processing_time = (end_time - start_time) / 60
    output.update({'processing_time': processing_time})

    json.dump(output, open(filepath + '.out', 'w'))
示例#11
0
def run(
    file_path,
    n_cpus,
    motion_correct: bool = True,
    quality_control: bool = False,
    mc_settings: dict = {},
    cnmf_settings: dict = {},
    qc_settings: dict = {},
    job_name: str = "job",
    output_directory: str = "",
):
    mkl = os.environ.get("MKL_NUM_THREADS")
    blas = os.environ.get("OPENBLAS_NUM_THREADS")
    vec = os.environ.get("VECLIB_MAXIMUM_THREADS")
    print(f"MKL: {mkl}")
    print(f"blas: {blas}")
    print(f"vec: {vec}")

    # we import the pipeline upon running so they aren't required for all installs
    import caiman as cm
    from caiman.source_extraction.cnmf import params as params
    from caiman.source_extraction import cnmf

    # print the directory caiman is imported from
    caiman_path = os.path.abspath(cm.__file__)
    print(f"caiman path: {caiman_path}")
    sys.stdout.flush()

    # setup the logger
    logger_file = os.path.join(output_directory, "caiman.log")
    logging.basicConfig(
        format=LOGGER_FORMAT,
        filename=logger_file,
        filemode="w",
        level=logging.DEBUG,
    )

    # if indices to perform mcorr are set, format them
    if "indices" in mc_settings:
        indices = mc_settings["indices"]

        indices_formatted = ()
        for axis_slice in indices:
            start = axis_slice[0]
            stop = axis_slice[1]
            if len(axis_slice) == 3:
                step = axis_slice[2]
            else:
                step = 1
            indices_formatted += (slice(start, stop, step), )
        mc_settings["indices"] = indices_formatted
    # load and update the pipeline settings
    mc_parameters = DEFAULT_MCORR_SETTINGS
    for k, v in mc_settings.items():
        mc_parameters[k] = v
    cnmf_parameters = DEFAULT_CNMF_PARAMETERS
    for k, v in cnmf_settings.items():
        cnmf_parameters[k] = v
    qc_parameters = DEFAULT_QC_PARAMETERS
    for k, v in qc_settings.items():
        qc_parameters[k] = v
    opts = params.CNMFParams(params_dict=mc_parameters)
    opts.change_params(params_dict=cnmf_parameters)
    opts.change_params(params_dict=qc_parameters)

    # get the filenames
    if os.path.isfile(file_path):
        print(file_path)
        fnames = [file_path]
    else:
        file_pattern = os.path.join(file_path, "*.tif*")
        fnames = sorted(glob.glob(file_pattern))
    print(fnames)
    opts.set("data", {"fnames": fnames})

    if n_cpus > 1:
        print("starting server")
        # start the server
        n_proc = np.max([(n_cpus - 1), 1])
        c, dview, n_processes = cm.cluster.setup_cluster(backend="local",
                                                         n_processes=n_proc,
                                                         single_thread=False)
        print(n_processes)
        sleep(30)
    else:
        print("multiprocessing disabled")
        dview = None
        n_processes = 1

    print("starting analysis")
    print(f"perform motion correction: {motion_correct}")
    print(f"perform qc: {quality_control}")
    sys.stdout.flush()
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm_results = cnm.fit_file(motion_correct=motion_correct,
                               include_eval=quality_control)

    print("evaluate components")
    sys.stdout.flush()
    Yr, dims, T = cm.load_memmap(cnm_results.mmap_file)
    images = Yr.T.reshape((T, ) + dims, order="F")
    cnm_results.estimates.evaluate_components(images, cnm.params, dview=dview)
    print("Number of total components: ", len(cnm_results.estimates.C))
    print("Number of accepted components: ",
          len(cnm_results.estimates.idx_components))

    # save the results object
    print("saving results")
    results_filebase = os.path.join(output_directory, job_name)
    cnm_results.save(results_filebase + ".hdf5")

    # if motion correction was performed, save the file
    # we save as hdf5 for better reading performance
    # downstream
    if motion_correct:
        print("saving motion corrected file")
        mcorr_fname = results_filebase + "_mcorr.hdf5"
        dataset_name = cnm_results.params.data["var_name_hdf5"]
        fnames = cnm_results.params.data["fnames"]
        memmap_files = []
        for f in fnames:
            if isinstance(f, bytes):
                f = f.decode()
            base_file = os.path.splitext(f)[0]
            if cnm_results.params.motion["pw_rigid"]:
                memmap_pattern = base_file + "*_els_*"
            else:
                memmap_pattern = base_file + "*_rig_*"
            memmap_files += sorted(glob.glob(memmap_pattern))
        write_hdf5_movie(
            movie_name=mcorr_fname,
            memmap_files=memmap_files,
            frame_shape=cnm_results.dims,
            dataset_name=dataset_name,
            compression="gzip",
        )

    # save the parameters in the same dir as the results
    final_params = cnm.params.to_dict()
    params_file = os.path.join(output_directory, "all_caiman_parameters.pkl")
    with open(params_file, "wb") as fp:
        pickle.dump(final_params, fp)

    print("stopping server")
    cm.stop_server(dview=dview)
cnm = cnmf.CNMF(
    n_processes=n_processes,
    method_init='corr_pnr',  # use this for 1 photon
    k=70,  # neurons per patch
    gSig=(3, 3),  # half size of neuron
    gSiz=(10, 10),  # in general 3*gSig+1
    merge_thresh=.8,  # threshold for merging
    p=1,  # order of autoregressive process to fit
    dview=dview,  # if None it will run on a single thread
    tsub=
    2,  # downsampling factor in time for initialization, increase if you have memory problems             
    ssub=
    2,  # downsampling factor in space for initialization, increase if you have memory problems
    Ain=
    None,  # if you want to initialize with some preselcted components you can pass them here as boolean vectors
    rf=(40, 40),  # half size of the patch (final patch will be 100x100)
    stride=(
        20, 20
    ),  # overlap among patches (keep it at least large as 4 times the neuron size)
    only_init_patch=True,  # just leave it as is
    gnb=16,  # number of background components
    nb_patch=16,  # number of background components per patch
    method_deconvolution='oasis',  #could use 'cvxpy' alternatively
    low_rank_background=True,  #leave as is
    update_background_components=
    True,  # sometimes setting to False improve the results
    min_corr=min_corr,  # min peak value from correlation image 
    min_pnr=min_pnr,  # min peak to noise ration from PNR image
    normalize_init=False,  # just leave as is
    center_psf=True,  # leave as is for 1 photon
    del_duplicates=True)  # whether to remove duplicates from initialization
示例#13
0
def process_data(haussio_data,
                 mask=None,
                 p=2,
                 nrois_init=400,
                 roi_iceberg=0.9,
                 merge_unconnected=None):
    if mask is not None:
        raise RuntimeError("mask not supported in cnmf.process_data")

    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'
    shapefn = os.path.join(haussio_data.dirname_comp,
                           haussio.THOR_RAW_FN[:-3] + "shape.npy")
    shape = np.load(shapefn)
    if len(shape) == 5:
        d1, d2 = shape[2], shape[3]
    else:
        d1, d2 = shape[1], shape[2]
    fn_mmap = get_mmap_name(haussio_data.dirname_comp + os.path.sep + 'Yr', d1,
                            d2, shape[0])

    tiffs_to_cnmf(haussio_data)
    if os.path.exists(fn_cnmf):
        resdict = loadmat(fn_cnmf)
        if "dFoF" in resdict.keys():
            A2 = resdict["A"]
            C2 = resdict["C"]
            YrA = resdict["YrA"]
            S2 = resdict["S"]
            dFoF = resdict["dFoF"]
            bl2 = resdict["bl"]
            f = resdict["f"]
            images = haussio_data.read_raw().squeeze()
        else:
            dFoF = None
    if not os.path.exists(fn_cnmf) or dFoF is None:
        c, dview, n_processes = cm.cluster.setup_cluster(
            backend='multiprocessing', n_processes=NCPUS, single_thread=False)

        Yr, dims, T = cm.load_memmap(fn_mmap, 'r+')
        d1, d2 = dims
        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        fr = 1.0 / haussio_data.dt  # imaging rate in frames per second\n",
        decay_time = 0.4  # length of a typical transient in seconds\n",

        # parameters for source extraction and deconvolution\n",
        bord_px_els = 32  # maximum shift to be used for trimming against NaNs
        p = 1  # order of the autoregressive system\n",
        gnb = 2  # number of global background components\n",
        merge_thresh = 0.8  # merging threshold, max correlation allowed\n",
        rf = int(
            np.round(np.sqrt(d1 * d2) / nrois_init)
        )  # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50\n",
        if rf < 16:
            rf = 16
        stride_cnmf = 6  # amount of overlap between the patches in pixels\n",
        npatches = np.round(d1 / (rf * 2) * d2 / (rf * 2))
        K = nrois_init / npatches  # number of components per patch\n",
        if K < 2:
            K = 2
        print(rf, npatches, K)
        gSig = [8, 8]  # expected half size of neurons\n",
        init_method = 'greedy_roi'  # initialization method (if analyzing dendritic data using 'sparse_nmf')\n",
        is_dendrites = False  # flag for analyzing dendritic data\n",
        alpha_snmf = None  # sparsity penalty for dendritic data analysis through sparse NMF\n",

        # parameters for component evaluation\n",
        min_SNR = 2.5  # signal to noise ratio for accepting a component\n",
        rval_thr = 0.8  # space correlation threshold for accepting a component\n",
        cnn_thr = 0.8  # threshold for CNN based classifier"

        cnm = caiman_cnmf.CNMF(n_processes=1,
                               k=K,
                               gSig=gSig,
                               merge_thresh=merge_thresh,
                               p=0,
                               dview=dview,
                               rf=rf,
                               stride=stride_cnmf,
                               memory_fact=1,
                               method_init=init_method,
                               alpha_snmf=alpha_snmf,
                               only_init_patch=False,
                               gnb=gnb,
                               border_pix=bord_px_els)
        cnm = cnm.fit(images)

        idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
            estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                             cnm.YrA, fr, decay_time, gSig, dims,
                                             dview = dview, min_SNR=min_SNR,
                                             r_values_min = rval_thr, use_cnn = False,
                                             thresh_cnn_lowest = cnn_thr)
        A_in, C_in, b_in, f_in = cnm.A[:, idx_components], cnm.C[
            idx_components], cnm.b, cnm.f
        cnm2 = caiman_cnmf.CNMF(n_processes=1,
                                k=A_in.shape[-1],
                                gSig=gSig,
                                p=p,
                                dview=dview,
                                merge_thresh=merge_thresh,
                                Ain=A_in,
                                Cin=C_in,
                                b_in=b_in,
                                f_in=f_in,
                                rf=None,
                                stride=None,
                                gnb=gnb,
                                method_deconvolution='oasis',
                                check_nan=True)
        cnm2 = cnm2.fit(images)

        if merge_unconnected is not None:
            idx_merge = []
            for nroi, ca_roi in enumerate(cnm2.C):
                for nroi_compare_counter, ca_roi_compare in enumerate(
                        cnm2.C[nroi + 1:]):
                    nroi_compare = nroi_compare_counter + nroi + 1
                    if nroi_compare not in idx_merge:
                        correls = np.correlate(ca_roi,
                                               ca_roi_compare,
                                               mode='same')
                        correls /= np.sqrt(
                            np.dot(ca_roi, ca_roi) *
                            np.dot(ca_roi_compare, ca_roi_compare))
                        if correls.max() > merge_unconnected:
                            print("Merging ", nroi_compare)
                            idx_merge.append(nroi_compare)
            idx_no_merge = [
                idx for idx in range(cnm2.C.shape[0]) if idx not in idx_merge
            ]
        else:
            idx_no_merge = range(cnm2.C.shape[0])
        A2 = cnm2.A[:, idx_no_merge].tocsc()
        C2 = cnm2.C[idx_no_merge]
        YrA = cnm2.YrA[idx_no_merge]
        S2 = cnm2.S[idx_no_merge]
        dFoF = cnm2.detrend_df_f(frames_window=300)[idx_no_merge]
        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise", i.e. traces = C+YrA)
        # S: Spikes
        # f: temporal background
        savemat(
            fn_cnmf, {
                "A": A2,
                "C": C2,
                "YrA": YrA,
                "S": S2,
                "dFoF": dFoF,
                "bl": cnm2.b,
                "f": cnm2.f
            })
        dview.terminate()
        cm.stop_server()

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(images)
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    logfiles = glob.glob("*LOG*")
    for logfile in logfiles:
        try:
            os.unlink(logfile)
        except OSError:
            pass

    polygons = contour(A2, images.shape[1], images.shape[2], thr=roi_iceberg)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, zproj, S2, images, YrA
def run():

    date_recorded = '200306'
    mouse_id = 'M504408'
    resolution = (512, 512)
    channel = 'green'
    data_folder_n = '110_LSNDGCUC_reorged'
    imaging_mode = 'deepscope'  # '2p' or 'deepscope'
    n_process = 6

    # ========================= caiman parameters for boutons ================================================
    # ============ sutter scope, zoom 4, 5 frames online average, 5 frames offline average ===================
    # fr = 2.  # frame rate (Hz)
    # decay_time = 0.5  # approximate length of transient event in seconds
    gSig = (
        5, 5
    )  # expected half size of neurons, (8, 8) for soma at zoom 2 on sutter scope
    p = 2  # order of AR indicator dynamics
    # min_SNR = 3  # minimum SNR for accepting new components
    # rval_thr = 0.80  # correlation threshold for new component inclusion
    # ds_factor = 1  # spatial downsampling factor (increases speed but may lose some fine structure)
    # gnb = 2  # number of background components
    # gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int'))  # recompute gSig if downsampling is involved
    mot_corr = False  # flag for online motion correction
    pw_rigid = False  # flag for pw-rigid motion correction (slower but potentially more accurate)
    # max_shifts_online = np.ceil(10. / ds_factor).astype('int')  # maximum allowed shift during motion correction
    # sniper_mode = True  # flag using a CNN to detect new neurons (o/w space correlation is used)
    # init_batch = 200  # number of frames for initialization (presumably from the first file)
    expected_comps = 500  # maximum number of expected components used for memory pre-allocation (exaggerate here)
    # dist_shape_update = True  # flag for updating shapes in a distributed way
    # min_num_trial = 10  # number of candidate components per frame
    K = 10  # initial number of components
    # epochs = 2  # number of passes over the data
    show_movie = False  # show the movie with the results as the data gets processed

    method_init = 'sparse_nmf'
    do_merge = False
    ssub = 1
    tsub = 1
    alpha_snmf = 10e1
    rolling_sum = False
    rf = 256
    p_ssub = 1
    p_tsub = 1
    Ain = None
    # method_deconvolution = 'oasis'
    border_pix = 0
    # ========================= caiman parameters for boutons ================================================
    curr_folder = os.path.dirname(os.path.realpath(__file__))

    # windows
    source_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data"

    # # ubuntu
    # source_folder = "/media/nc-ophys/Jun/raw_data"

    data_folder = os.path.join(
        source_folder, f'{date_recorded}-{mouse_id}-'
        f'{imaging_mode}/{data_folder_n}')

    plane_ns = [
        f for f in os.listdir(data_folder)
        if os.path.isdir(os.path.join(data_folder, f)) and f[:5] == 'plane'
    ]
    plane_ns.sort()
    print('planes:')
    print('\n'.join(plane_ns))

    c, dview, _ = cm.cluster.setup_cluster(backend='local',
                                           n_processes=n_process,
                                           single_thread=False)

    for plane_n in plane_ns:

        print('\nsegmenting plane: {}'.format(plane_n))

        plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected')
        os.chdir(plane_folder)

        fn = [
            f for f in os.listdir(plane_folder)
            if len(f) > 16 and f[-5:] == '.mmap'
        ]
        if len(fn) > 1:
            print('\n'.join(fn))
            raise LookupError('more than one file found.')
        elif len(fn) == 0:
            raise LookupError('no file found.')
        else:
            fn = fn[0]

        fp = os.path.join(os.path.realpath(plane_folder), fn)

        params_dict = {
            'fnames': [fp],
            # 'fr': fr,
            # 'decay_time': decay_time,
            'gSig': gSig,
            'p': p,
            # 'min_SNR': min_SNR,
            # 'rval_thr': rval_thr,
            # 'ds_factor': ds_factor,
            # 'nb': gnb,
            'motion_correct': mot_corr,
            # 'init_batch': init_batch,
            # 'init_method': 'bare',
            # 'normalize': True,
            'expected_comps': expected_comps,
            # 'sniper_mode': sniper_mode,
            # 'dist_shape_update': dist_shape_update,
            # 'min_num_trial': min_num_trial,
            'K': K,
            # 'epochs': epochs,
            # 'max_shifts_online': max_shifts_online,
            'pw_rigid': pw_rigid,
            'show_movie': show_movie,

            # testing parameters
            'method_init': method_init,
            'do_merge': do_merge,
            'ssub': ssub,
            'tsub': tsub,
            'alpha_snmf': alpha_snmf,
            'rolling_sum': rolling_sum,
            'rf': rf,
            'p_ssub': p_ssub,
            'p_tsub': p_tsub,
            # 'Ain': Ain,
            # 'method_deconvolution': method_deconvolution,
            'border_pix': border_pix
        }

        opts = cnmf.params.CNMFParams(params_dict=params_dict)

        cnm1 = cnmf.CNMF(n_process, params=opts, dview=dview)
        cnm1.fit_file(motion_correct=False)

        roi_num = cnm1.estimates.A.shape[1]
        print('saving ...')
        save_f = h5py.File('caiman_segmentation_results.hdf5', 'a')
        save_f.create_dataset('masks',
                              data=np.array(
                                  cnm1.estimates.A.todense()).T.reshape(
                                      (roi_num, resolution[0], resolution[1]),
                                      order='F'),
                              compression='lzf')
        save_f.create_dataset('traces', data=cnm1.estimates.C)
        save_f.close()

        copyfile(
            os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'),
            os.path.join(curr_folder, plane_n,
                         'caiman_segmentation_results.hdf5'))

        # %% STOP CLUSTER and clean up log files
        # cm.stop_server(dview=dview)
        log_files = glob.glob('*_LOG_*')
        for log_file in log_files:
            os.remove(log_file)
示例#15
0
#********************************************************************************************************************************
#********************************************************************************************************************************
#%% obtain initial batch file used for initialization

fname_new = Y[:initbatch].save('demo.mmap', order='C')              # memory map file (not needed)
Yr, dims, T = cm.load_memmap(fname_new)
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Cn_init = cm.local_correlations(np.reshape(Yr, dims + (T,), order='F'))


#%% RUN (offline) CNMF algorithm on the initial batch
pl.close('all')
cnm_init = cnmf.CNMF(2, k=K, gSig=gSig, merge_thresh=merge_thresh,
                     p=p, rf=patch_size//2, stride=stride, skip_refinement=False,
                     normalize_init=False, options_local_NMF=None,
                     minibatch_shape=100, minibatch_suff_stat=5,
                     update_num_comps=True, rval_thr=rval_thr,
                     thresh_fitness_delta=-50, gnb = gnb,
                     thresh_fitness_raw=thresh_fitness_raw,
                     batch_update_suff_stat=True, max_comp_update_shape=max_comp_update_shape)

cnm_init = cnm_init.fit(images)

print(('Number of components:' + str(cnm_init.A.shape[-1])))

if False:
    pl.figure()
    crd = plot_contours(cnm_init.A.tocsc(), Cn_init, thr=0.9)


#%% run (online) OnACID algorithm 
示例#16
0
Y = np.reshape(Yr, dims + (T, ), order='F')
Cn2 = cm.local_correlations(Y)
pl.imshow(Cn2)
#%% RUN ALGORITHM ON PATCHES
pl.close('all')
cnm_init = cnmf.CNMF(n_processes,
                     method_init='greedy_roi',
                     k=K,
                     gSig=gSig,
                     merge_thresh=merge_thresh,
                     p=0,
                     dview=dview,
                     Ain=None,
                     rf=rf,
                     stride=stride,
                     method_deconvolution='oasis',
                     skip_refinement=False,
                     normalize_init=False,
                     options_local_NMF=None,
                     minibatch_shape=100,
                     minibatch_suff_stat=5,
                     update_num_comps=True,
                     rval_thr=rval_thr,
                     thresh_fitness_delta=thresh_fitness_delta,
                     thresh_fitness_raw=thresh_fitness_raw,
                     batch_update_suff_stat=True,
                     max_comp_update_shape=5)

cnm_init = cnm_init.fit(images)
A_tot = cnm_init.A
C_tot = cnm_init.C
YrA_tot = cnm_init.YrA
示例#17
0
    params_dict = {
        'fnames': fname_green,
        'fr': fr,
        'decay_time': decay_time,
        'gSig': gSig,
        'p': p,
        'min_SNR': min_SNR,
        'rval_thr': rval_thr,
        'nb': gnb,
        'only_init': False,
        'rf': None
    }

    opts = cnmf.params.CNMFParams(params_dict=params_dict)
    cnm_b = cnmf.CNMF(n_processes, params=opts, dview=dview, Ain=Ain)
    cnm_b.fit_file(motion_correct=False)

    # %% load memory mapped file and evaluate components

    Yr, dims, T = cm.load_memmap(cnm_b.mmap_file)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Cn = cm.local_correlations(images, swap_dim=False)
    cnm_b.estimates.plot_contours(img=Cn)

    # %% evalute components and do some plotting
    cnm_b.estimates.evaluate_components(images, cnm_b.params, dview=dview)
    cnm_b.estimates.plot_contours(img=Cn, idx=cnm_b.estimates.idx_components)
    cnm_b.estimates.view_components(images,
                                    img=Cn,
                                    idx=cnm_b.estimates.idx_components)
#%%
rf = 10  # half-size of the patches in pixels. rf=25, patches are 50x50
stride = 4  # amounpl.it of overlap between the patches in pixels
K = 4  # number of neurons expected per patch
gSig = [5, 5]  # expected half size of neurons
merge_thresh = 0.8  # merging threshold, max correlation allowed
p = 2  # order of the autoregressive system
memory_fact = 1  # unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
save_results = False
#%% RUN ALGORITHM ON PATCHES
cnm = cnmf.CNMF(n_processes,
                k=K,
                gSig=gSig,
                merge_thresh=0.8,
                p=0,
                dview=dview,
                Ain=None,
                rf=rf,
                stride=stride,
                memory_fact=memory_fact,
                method_init='greedy_roi',
                alpha_snmf=10e2)
cnm = cnm.fit(images)

A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn

print(('Number of components:' + str(A_tot.shape[-1])))
示例#19
0
def run(batch_dir: str, UUID: str):
    start_time = time()

    output = {'status': 0, 'output_info': ''}
    n_processes = os.environ['_MESMERIZE_N_THREADS']
    n_processes = int(n_processes)
    file_path = batch_dir + '/' + UUID

    filename = file_path + '.tiff'
    input_params = pickle.load(open(file_path + '.params', 'rb'))

    frate = input_params['frate']
    gSig = input_params['gSig']
    gSiz = 3 * gSig + 1
    min_corr = input_params['min_corr']
    min_pnr = input_params['min_pnr']
    min_SNR = input_params['min_SNR']
    r_values_min = input_params['r_values_min']
    decay_time = input_params['decay_time']
    rf = input_params['rf']
    stride = input_params['stride']
    gnb = input_params['gnb']
    nb_patch = input_params['nb_patch']
    k = input_params['k']
    if 'Ain' in input_params.keys():
        if input_params['Ain']:
            print('>> Ain specified, looking for cnm-A file <<')
            item_uuid = input_params['Ain']
            parent_batch_dir = os.environ['CURR_BATCH_DIR']
            item_out_file = os.path.join(parent_batch_dir, f'{item_uuid}.out')
            t0 = time()
            timeout = 60
            while not os.path.isfile(item_out_file):
                print('>>> cnm-A not found, waiting for 15 seconds <<<')
                sleep(15)
                if time() - t0 > timeout:
                    output.update({'status': 0, 'output_info': 'Timeout exceeding in waiting for Ain input file'})
                    raise TimeoutError('Timeout exceeding in waiting for Ain input file')

            if os.path.isfile(item_out_file):
                if json.load(open(item_out_file, 'r'))['status']:
                    Ain_file = os.path.join(parent_batch_dir, item_uuid + '_cnm-A.pikl')
                    Ain = pickle.load(open(Ain_file, 'rb'))
                    print('>>> Found Ain file <<<')
                else:
                    raise FileNotFoundError('>>> Could not find specified Ain file <<<')
        else:
            Ain = None
    else:
        Ain = None

    if 'method_deconvolution' in input_params.keys():
        method_deconvolution = input_params['method_deconvolution']
    else:
        method_deconvolution = 'oasis'

    if 'deconv_flag' in input_params.keys():
        deconv_flag = input_params['deconv_flag']
    else:
        deconv_flag = True

    filename = [filename]

    print('*********** Creating Process Pool ***********')

    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',  # use this one
                                                     n_processes=n_processes,
                                                     single_thread=False)
    if 'bord_px' in input_params.keys():
        bord_px = input_params['bord_px']
    else:
        bord_px = 6

    try:
        print('Creating memmap')
        fname_new = cm.save_memmap_each(
            filename,
            base_name='memmap_' + UUID,
            order='C',
            border_to_0=bord_px,
            dview=dview)
        fname_new = cm.save_memmap_join(fname_new, base_name='memmap_' + UUID, dview=dview)
        # load memory mappable file
        Yr, dims, T = cm.load_memmap(fname_new)
        Y = Yr.T.reshape((T,) + dims, order='F')
        # compute some summary images (correlation and peak to noise)
        # change swap dim if output looks weird, it is a problem with tiffile
        cn_filter, pnr = cm.summary_images.correlation_pnr(
            Y, gSig=gSig, swap_dim=False)
        if not input_params['do_cnmfe'] and input_params['do_corr_pnr']:
            pickle.dump(cn_filter, open(UUID + '_cn_filter.pikl', 'wb'), protocol=4)
            pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)

            output_file_list = [UUID + '_pnr.pikl',
                                UUID + '_cn_filter.pikl',
                                UUID + '_dims.pikl',
                                UUID + '.out'
                                ]

            output.update({'output': UUID,
                           'status': 1,
                           'output_info': 'inspect correlation & pnr',
                           'output_files': output_file_list
                           })

            dview.terminate()

            for mf in glob(batch_dir + '/memmap_*'):
                os.remove(mf)

            end_time = time()
            processing_time = (end_time - start_time) / 60
            output.update({'processing_time': processing_time})

            json.dump(output, open(file_path + '.out', 'w'))

            return

        cnm = cnmf.CNMF(n_processes=n_processes,
                        method_init='corr_pnr',  # use this for 1 photon
                        k=k,  # neurons per patch
                        gSig=(gSig, gSig),  # half size of neuron
                        gSiz=(gSiz, gSiz),  # in general 3*gSig+1
                        merge_thresh=.3,  # threshold for merging
                        p=1,  # order of autoregressive process to fit
                        dview=dview,  # if None it will run on a single thread
                        # downsampling factor in time for initialization, increase if you have memory problems
                        tsub=2,
                        # downsampling factor in space for initialization, increase if you have memory problems
                        ssub=2,
                        # if you want to initialize with some preselcted components you can pass them here as boolean vectors
                        Ain=Ain,
                        # half size of the patch (final patch will be 100x100)
                        rf=(rf, rf),
                        # overlap among patches (keep it at least large as 4 times the neuron size)
                        stride=(stride, stride),
                        only_init_patch=True,  # just leave it as is
                        gnb=gnb,  # number of background components
                        nb_patch=nb_patch,  # number of background components per patch
                        method_deconvolution=method_deconvolution,  # could use 'cvxpy' alternatively
                        deconv_flag=deconv_flag,
                        low_rank_background=True,  # leave as is
                        # sometimes setting to False improve the results
                        update_background_components=True,
                        min_corr=min_corr,  # min peak value from correlation image
                        min_pnr=min_pnr,  # min peak to noise ration from PNR image
                        normalize_init=False,  # just leave as is
                        center_psf=True,  # leave as is for 1 photon
                        del_duplicates=True,  # whether to remove duplicates from initialization
                        border_pix=bord_px)  # number of pixels to not consider in the borders
        cnm.fit(Y)

        #  DISCARD LOW QUALITY COMPONENTS
        idx_components, idx_components_bad, comp_SNR, r_values, pred_CNN = estimate_components_quality_auto(
            Y, cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, frate,
            decay_time, gSig, dims, dview=dview,
            min_SNR=min_SNR, r_values_min=r_values_min, use_cnn=False)

        # np.save(filename[:-5] + '_curves.npy', cnm.C)
        pickle.dump(Yr, open(UUID + '_Yr.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.A, open(UUID + '_cnm-A.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.b, open(UUID + '_cnm-b.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.C, open(UUID + '_cnm-C.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.f, open(UUID + '_cnm-f.pikl', 'wb'), protocol=4)
        pickle.dump(idx_components, open(UUID + '_idx_components.pikl', 'wb'), protocol=4)
        pickle.dump(cnm.YrA, open(UUID + '_cnm-YrA.pikl', 'wb'), protocol=4)
        pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)
        pickle.dump(cn_filter, open(UUID + '_cn_filter.pikl', 'wb'), protocol=4)
        pickle.dump(dims, open(UUID + '_dims.pikl', 'wb'), protocol=4)

        output_file_list = [UUID + '_cnm-A.pikl',
                            UUID + '_Yr.pikl',
                            UUID + '_cnm-b.pikl',
                            UUID + '_cnm-C.pikl',
                            UUID + '_cnm-f.pikl',
                            UUID + '_idx_components.pikl',
                            UUID + '_cnm-YrA.pikl',
                            UUID + '_pnr.pikl',
                            UUID + '_cn_filter.pikl',
                            UUID + '_dims.pikl',
                            UUID + '.out'
                            ]
        output.update({'output': filename[:-5],
                       'status': 1,
                       'output_files': output_file_list
                       })

    except Exception as e:
        output.update({'status': 0, 'output_info': traceback.format_exc()})

    dview.terminate()

    for mf in glob(batch_dir + '/memmap_*'):
        os.remove(mf)

    end_time = time()
    processing_time = (end_time - start_time) / 60
    output.update({'processing_time': processing_time})

    json.dump(output, open(file_path + '.out', 'w'))
示例#20
0
def run_source_extraction(input_file, dview):
    """
    This is the function for source extraction.
    Its goal is to take in a .mmap file,
    perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file.
    """

    sql = "SELECT equalization,source_extraction_session_wise,fr,decay_time,min_corr,min_pnr,p,K,gSig,merge_thr,rf,stride,tsub,ssub,p_tsub,p_ssub,low_rank_background,nb,nb_patch,ssub_B,init_iter,ring_size_factor,method_init,method_deconvolution,update_background_components,center_psf,border_pix,normalize_init,del_duplicates,only_init  FROM Analysis WHERE motion_correction_main =?  OR alignment_main = ? OR equalization_main =?"
    val = [input_file, input_file, input_file]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    para = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        para.append(y)
    gSiz = 4 * para[8] + 1
    parameters = {
        'equalization': para[0],
        'session_wise': para[1],
        'fr': para[2],
        'decay_time': para[3],
        'min_corr': para[4],
        'min_pnr': para[5],
        'p': para[6],
        'K': para[7],
        'gSig': (para[8], para[8]),
        'gSiz': (gSiz, gSiz),
        'merge_thr': para[9],
        'rf': para[10],
        'stride': para[11],
        'tsub': para[12],
        'ssub': para[13],
        'p_tsub': para[14],
        'p_ssub': para[15],
        'low_rank_background': para[16],
        'nb': para[17],
        'nb_patch': para[18],
        'ssub_B': para[19],
        'init_iter': para[20],
        'ring_size_factor': para[21],
        'method_init': para[22],
        'method_deconvolution': para[23],
        'update_background_components': para[24],
        'center_psf': para[25],
        'border_pix': para[26],
        'normalize_init': para[27],
        'del_duplicates': para[28],
        'only_init': para[29]
    }
    # Determine output paths

    if parameters['session_wise']:
        data_dir = os.environ[
            'DATA_DIR_LOCAL'] + 'data/interim/source_extraction/session_wise/'
    else:
        data_dir = os.environ[
            'DATA_DIR_LOCAL'] + 'data/interim/source_extraction/trial_wise/'

    sql1 = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,equalization_v,source_extraction_v,input,home_path,decoding_main FROM Analysis WHERE  motion_correction_main =?  OR alignment_main = ? OR equalization_main =?"
    val1 = [input_file, input_file, input_file]
    cursor.execute(sql1, val1)
    result = cursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[9] == 0:
        data[9] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}"
        output_file_path = data_dir + f'main/{file_name}.hdf5'
        sql1 = "UPDATE Analysis SET source_extraction_main=?,source_extraction_v=? WHERE  motion_correction_main =?  OR alignment_main = ? OR equalization_main =? "
        val1 = [output_file_path, data[9], input_file, input_file, input_file]
        cursor.execute(sql1, val1)

    else:
        data[9] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}"
        output_file_path = data_dir + f'main/{file_name}.hdf5'
        sql2 = "INSERT INTO Analysis (source_extraction_main,source_extraction_v) VALUES (?,?)"
        val2 = [output_file_path, data[10]]
        cursor.execute(sql2, val2)
        database.commit()

    database.commit()

    # Load memmory mappable input file
    if os.path.isfile(input_file):
        Yr, dims, T = cm.load_memmap(input_file)
        images = Yr.T.reshape((T, ) + dims, order='F')
    else:
        logging.warning(f' .mmap file does not exist. Cancelling')

    # SOURCE EXTRACTION
    # Check if the summary images are already there
    corr_npy_file_path, pnr_npy_file_path = get_corr_pnr_path(
        gSig_abs=parameters['gSig'][0])

    if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
        # Already computed summary images
        logging.info(f' Already computed summary images')
        cn_filter = np.load(corr_npy_file_path)
        pnr = np.load(pnr_npy_file_path)
    else:
        # Compute summary images
        t0 = datetime.datetime.today()
        logging.info(f' Computing summary images')
        cn_filter, pnr = cm.summary_images.correlation_pnr(
            images[::1], gSig=parameters['gSig'][0], swap_dim=False)
        dt = int((datetime.datetime.today() - t0).seconds /
                 60)  # timedelta in minutes
        logging.info(f' Computed summary images. dt = {dt} min')
        # Saving summary images as npy files
        gSig = parameters['gSig'][0]
        corr_npy_file_path = data_dir + f'/meta/corr/{file_name}_gSig_{gSig}.npy'
        pnr_npy_file_path = data_dir + f'/meta/pnr/{file_name}_gSig_{gSig}.npy'
        with open(corr_npy_file_path, 'wb') as f:
            np.save(f, cn_filter)
        with open(pnr_npy_file_path, 'wb') as f:
            np.save(f, pnr)

    # Calculate min, mean, max value for cn_filter and pnr
    corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(
    ), cn_filter.max()
    pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()

    # If min_corr and min_pnr are specified via a linear equation, calculate
    # this value
    if type(parameters['min_corr']) == list:
        min_corr = parameters['min_corr'][0] * corr_mean + parameters[
            'min_corr'][1]
        parameters['min_corr'] = min_corr
        logging.info(f' Automatically setting min_corr = {min_corr}')
    if type(parameters['min_pnr']) == list:
        min_pnr = parameters['min_pnr'][0] * pnr_mean + parameters['min_pnr'][1]
        parameters['min_pnr'] = min_pnr
        logging.info(f' Automatically setting min_pnr = {min_pnr}')

    # Set the parameters for caiman
    opts = params.CNMFParams(params_dict=parameters)

    # SOURCE EXTRACTION
    logging.info(f' Performing source extraction')
    t0 = datetime.datetime.today()
    n_processes = psutil.cpu_count()
    logging.info(f' n_processes: {n_processes}')
    cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, params=opts)
    cnm.fit(images)
    cnm.estimates.dims = dims

    # Calculate the center of masses
    cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A,
                                                images.shape[1],
                                                images.shape[2])

    # Save the cnmf object as a hdf5 file
    logging.info(f' Saving cnmf object')
    cnm.save(output_file_path)
    dt = int(
        (datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    logging.info(f' Source extraction finished. dt = {dt} min')

    sql1 = "UPDATE Analysis SET duration_summary_images=?,source_extraction_corr=?, source_extraction_pnr=?, source_extraction_corr_min =?, source_extraction_corr_mean=?, source_extraction_corr_max=?, source_extraction_pnr_min=?,source_extraction_pnr_mean=?,source_extraction_pnr_max=?,source_extraction_k=?,source_extraction_duration=?,min_corr=?,min_pnr=? WHERE source_extraction_main= ? AND source_extraction_v=? "
    val1 = [
        dt, corr_npy_file_path, pnr_npy_file_path, corr_min, corr_mean,
        corr_max, pnr_min, pnr_mean, pnr_max,
        len(cnm.estimates.C), dt, output_file_path, data[9]
    ]
    cursor.execute(sql1, val1)

    return output_file_path, data[9]
示例#21
0
                                                 single_thread=False)

#%%
cnm = cnmf.CNMF(n_processes=n_processes,
                method_init='corr_pnr',
                k=35,
                gSig=(3, 3),
                gSiz=(10, 10),
                merge_thresh=.8,
                p=1,
                dview=dview,
                tsub=1,
                ssub=1,
                Ain=None,
                rf=(25, 25),
                stride=(25, 25),
                only_init_patch=True,
                gnb=5,
                nb_patch=3,
                method_deconvolution='oasis',
                low_rank_background=False,
                update_background_components=False,
                min_corr=min_corr,
                min_pnr=min_pnr,
                normalize_init=False,
                deconvolve_options_init=None,
                ring_size_factor=1.5,
                center_psf=True)

#%%
# cnm = cnmf.CNMF(n_processes=2, method_init='corr_pnr', k=155, gSig=(3, 3), gSiz=(10, 10), merge_thresh=.8,
示例#22
0
def run_source_extraction(row, parameters, dview, session_wise = False):
    '''
    This is the function for source extraction.
    Its goal is to take in a .mmap file,
    perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file.
    Args:
        row: pd.DataFrame object
            The row corresponding to the analysis state to be source extracted. 
            
    Returns:
        row: pd.DataFrame object
            The row corresponding to the source extracted analysis state.   
    '''
    step_index = 5
    row_local = row.copy()
    row_local.loc['source_extraction_parameters'] = str(parameters)
    row_local = db.set_version_analysis('source_extraction',row_local,session_wise)
    index = row_local.name

    # Determine input path
    if parameters['session_wise']:
        input_mmap_file_path = eval(row_local.loc['alignment_output'])['main']
        if parameters['equalization']:
            input_mmap_file_path =eval(row_local['equalization_output'])['main']
    else: 
        input_mmap_file_path = eval(row_local.loc['motion_correction_output'])['main']
    if not os.path.isfile(input_mmap_file_path):
        logging.error('Input file does not exist. Cancelling.')
        return row_local
    
    # Determine output paths
    file_name = db.create_file_name(step_index, index)
    if parameters['session_wise']:
        data_dir = os.environ['DATA_DIR'] + 'data/interim/source_extraction/session_wise/'
    else:
        data_dir = os.environ['DATA_DIR'] + 'data/interim/source_extraction/trial_wise/'
    output_file_path = data_dir + f'main/{file_name}.hdf5'
   
        
    # Create a dictionary with parameters
    output = {
            'main': output_file_path,
            'meta':{
                'analysis' : {
                        'analyst' : os.environ['ANALYST'],
                        'date' : datetime.datetime.today().strftime("%m-%d-%Y"),
                        'time' : datetime.datetime.today().strftime("%H:%M:%S"),
                        },
                    'duration': {}
                    }
                }
    
    # Load memmory mappable input file
    if os.path.isfile(input_mmap_file_path):
        Yr, dims, T = cm.load_memmap(input_mmap_file_path)
#        logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
        images = Yr.T.reshape((T,) + dims, order='F')
    else:
        logging.warning(f'{index} .mmap file does not exist. Cancelling')
        return row_local
    
    # SOURCE EXTRACTION
    # Check if the summary images are already there
    corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(index, gSig_abs = parameters['gSig'][0])
    
    if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):  
        # Already computed summary images
        logging.info(f'{index} Already computed summary images')
        cn_filter = np.load(corr_npy_file_path)
        pnr = np.load(pnr_npy_file_path)
    else:
        # Compute summary images
        t0 = datetime.datetime.today()
        logging.info(f'{index} Computing summary images')
        cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig = parameters['gSig'][0], swap_dim=False)
        dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
        output['meta']['duration']['summary_images'] = dt 
        logging.info(f'{index} Computed summary images. dt = {dt} min')
        # Saving summary images as npy files
        gSig = parameters['gSig'][0]
        corr_npy_file_path = data_dir + f'/meta/corr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
        pnr_npy_file_path = data_dir + f'/meta/pnr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
        with open(corr_npy_file_path, 'wb') as f:
            np.save(f, cn_filter)
        with open(pnr_npy_file_path, 'wb') as f:
            np.save(f, pnr)
    
    # Store the paths in the meta dictionary 
    output['meta']['corr'] = {'main': corr_npy_file_path, 'meta': {}}
    output['meta']['pnr'] = {'main': pnr_npy_file_path, 'meta': {}}
    
    # Calculate min, mean, max value for cn_filter and pnr
    corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(), cn_filter.max()
    output['meta']['corr']['meta'] = {'min': corr_min, 'mean': corr_mean, 'max': corr_max}
    pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()
    output['meta']['pnr']['meta'] = {'min': pnr_min, 'mean': pnr_mean, 'max': pnr_max}
    
    # If min_corr and min_pnr are specified via a linear equation, calculate 
    # this value 
    if type(parameters['min_corr']) == list:
        min_corr = parameters['min_corr'][0]*corr_mean + parameters['min_corr'][1]
        parameters['min_corr'] = min_corr
        logging.info(f'{index} Automatically setting min_corr = {min_corr}')
    if type(parameters['min_pnr']) == list:
        min_pnr =  parameters['min_pnr'][0]*pnr_mean + parameters['min_pnr'][1]
        parameters['min_pnr'] = min_pnr
        logging.info(f'{index} Automatically setting min_pnr = {min_pnr}')

    # Set the parameters for caiman
    opts = params.CNMFParams(params_dict = parameters)   
    
    # SOURCE EXTRACTION 
    logging.info(f'{index} Performing source extraction')
    t0 = datetime.datetime.today()
    n_processes = psutil.cpu_count()
    logging.info(f'{index} n_processes: {n_processes}')
    cnm = cnmf.CNMF(n_processes = n_processes, dview = dview, params = opts)
    cnm.fit(images)
    cnm.estimates.dims = dims    
    
    # Store the number of neurons
    output['meta']['K'] = len(cnm.estimates.C)
    
    # Calculate the center of masses
    cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A, images.shape[1], images.shape[2])
    
    # Save the cnmf object as a hdf5 file 
    logging.info(f'{index} Saving cnmf object')
    cnm.save(output_file_path)
    dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
    output['meta']['duration']['source_extraction'] = dt
    logging.info(f'{index} Source extraction finished. dt = {dt} min')
    
    # Write necessary variables in row and return
    row_local.loc['source_extraction_parameters'] = str(parameters)
    row_local.loc['source_extraction_output'] = str(output)
        
    return row_local
示例#23
0
def run(batch_dir: str, UUID: str):
    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format=
        "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s"
    )

    start_time = time()

    output = {'status': 0, 'output_info': ''}
    n_processes = os.environ['_MESMERIZE_N_THREADS']
    n_processes = int(n_processes)
    file_path = os.path.join(batch_dir, UUID)

    filename = [file_path + '_input.tiff']
    input_params = pickle.load(open(file_path + '.params', 'rb'))

    # If Ain is specified
    if input_params['do_cnmfe']:
        Ain = None
        item_uuid = input_params['cnmfe_kwargs'].pop('Ain')

        if item_uuid:
            print('>> Ain specified, looking for cnm-A file <<')
            parent_batch_dir = os.environ['CURR_BATCH_DIR']
            item_out_file = os.path.join(parent_batch_dir, f'{item_uuid}.out')
            t0 = time()
            timeout = 60
            while not os.path.isfile(item_out_file):
                print('>>> cnm-A not found, waiting for 15 seconds <<<')
                sleep(15)
                if time() - t0 > timeout:
                    output.update({
                        'status':
                        0,
                        'output_info':
                        'Timeout exceeding in waiting for Ain input file'
                    })
                    raise TimeoutError(
                        'Timeout exceeding in waiting for Ain input file')

            if os.path.isfile(item_out_file):
                if json.load(open(item_out_file, 'r'))['status']:
                    Ain_path = os.path.join(parent_batch_dir,
                                            item_uuid + '_results.hdf5')
                    Ain = load_dict_from_hdf5(Ain_path)['estimates']['A']
                    print('>>> Found Ain file <<<')
                else:
                    raise FileNotFoundError(
                        '>>> Could not find specified Ain file <<<')

    print('*********** Creating Process Pool ***********')

    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=n_processes,
                                                     single_thread=False
                                                     #ignore_preexisting=True
                                                     )

    try:
        print('Creating memmap')

        # memmap_path = cm.save_memmap_each(
        #     filename,
        #     base_name='memmap-' + UUID,
        #     order='C',
        #     border_to_0=input_params['border_pix'],
        #     dview=dview)
        # memmap_path = cm.save_memmap_join(memmap_path, base_name='memmap-' + UUID, dview=dview)
        # # load memory mappable file
        # Yr, dims, T = cm.load_memmap(memmap_path)
        # Y = Yr.T.reshape((T,) + dims, order='F')

        memmap_path = cm.save_memmap(
            filename,
            base_name=f'memmap-',
            order='C',
            dview=dview,
            border_to_0=input_params['border_pix'],
        )

        Yr, dims, T = cm.load_memmap(memmap_path)
        Y = np.reshape(Yr.T, [T] + list(dims), order='F')

        if input_params['do_cnmfe']:
            gSig = input_params['cnmfe_kwargs']['gSig'][0]
        else:
            gSig = input_params['corr_pnr_kwargs']['gSig']

        cn_filter, pnr = cm.summary_images.correlation_pnr(Y,
                                                           swap_dim=False,
                                                           gSig=gSig)

        if not input_params['do_cnmfe'] and input_params['do_corr_pnr']:
            pickle.dump(cn_filter,
                        open(UUID + '_cn_filter.pikl', 'wb'),
                        protocol=4)
            pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)

            output_file_list = \
                [
                    UUID + '_pnr.pikl',
                    UUID + '_cn_filter.pikl',
                ]

            output.update({
                'output': UUID,
                'status': 1,
                'output_info': 'inspect correlation & pnr',
                'output_files': output_file_list
            })

            dview.terminate()

            for mf in glob(batch_dir + '/memmap-*'):
                os.remove(mf)

            end_time = time()
            processing_time = (end_time - start_time) / 60
            output.update({'processing_time': processing_time})

            json.dump(output, open(file_path + '.out', 'w'))

            return

        cnm = cnmf.CNMF(
            n_processes=n_processes,
            method_init='corr_pnr',
            dview=dview,
            Ain=Ain,
            only_init_patch=True,  # just leave it as is
            normalize_init=False,
            center_psf=True,
            **input_params['cnmfe_kwargs'],
        )

        cnm.fit(Y)

        #  DISCARD LOW QUALITY COMPONENTS
        cnm.params.set('quality', {
            'use_cnn': False,
            **input_params['eval_kwargs']
        })

        cnm.estimates.evaluate_components(Y, cnm.params, dview=dview)

        out_filename = f'{UUID}_results.hdf5'
        cnm.save(out_filename)

        pickle.dump(pnr, open(UUID + '_pnr.pikl', 'wb'), protocol=4)
        pickle.dump(cn_filter,
                    open(UUID + '_cn_filter.pikl', 'wb'),
                    protocol=4)

        output.update({
            'output':
            filename[:-5],
            'status':
            1,
            'output_files': [
                out_filename,
                UUID + '_pnr.pikl',
                UUID + '_cn_filter.pikl',
            ]
        })

    except Exception as e:
        output.update({
            'status': 0,
            'Y.shape': Y.shape,
            'output_info': traceback.format_exc()
        })

    dview.terminate()

    for mf in glob(batch_dir + '/memmap-*'):
        try:
            os.remove(mf)
        except:  # Windows doesn't like removing the memmaps
            pass

    end_time = time()
    processing_time = (end_time - start_time) / 60
    output.update({'processing_time': processing_time})

    json.dump(output, open(file_path + '.out', 'w'))
Yr, dims, T = cm.load_memmap(fname_new)
Y = Yr.T.reshape((T, ) + dims, order='F')

#%% run w/o patches
dview, n_processes = None, 2

cnm = cnmf.CNMF(n_processes=n_processes,
                method_init='corr_pnr',
                k=None,
                dview=dview,
                gSig=(gSig, gSig),
                gSiz=(gSiz, gSiz),
                merge_thresh=.65,
                p=1,
                tsub=1,
                ssub=1,
                only_init_patch=True,
                gnb=0,
                min_corr=.7,
                min_pnr=7,
                normalize_init=False,
                ring_size_factor=1.4,
                center_psf=True,
                ssub_B=2,
                init_iter=1,
                s_min=-10)
cnm.fit(Y)

#%% run w/ patches
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                 n_processes=None,
                                                 single_thread=False)
示例#25
0
K = 14  # number of neurons expected per patch
gSig = [7, 7]  # expected half size of neurons
merge_thresh = 0.8  # merging threshold, max correlation allowed
p = 1  # order of the autoregressive system
memory_fact = 1  # unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
save_results = False
#%% RUN ALGORITHM ON PATCHES
t1 = time()
cnm = cnmf.CNMF(n_processes,
                k=K,
                gSig=gSig,
                merge_thresh=0.8,
                p=0,
                dview=dview,
                Ain=None,
                rf=rf,
                stride=stride,
                memory_fact=memory_fact,
                method_init=init_method,
                alpha_snmf=alpha_snmf,
                only_init_patch=True,
                gnb=1,
                method_deconvolution='oasis')
cnm = cnm.fit(images)

A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
示例#26
0
                                        'nb': gnb,
                                        'nb_patch': nb_patch,
                                        'method_deconvolution': 'oasis',       # could use 'cvxpy' alternatively
                                        'low_rank_background': low_rank_background,
                                        'update_background_components': True,  # sometimes setting to False improve the results
                                        'min_corr': min_corr,
                                        'min_pnr': min_pnr,
                                        'normalize_init': False,               # just leave as is
                                        'center_psf': True,                    # leave as is for 1 photon
                                        'ssub_B': ssub_B,
                                        'ring_size_factor': ring_size_factor,
                                        'del_duplicates': True,                # whether to remove duplicates from initialization
                                        'border_pix': 10})                # number of pixels to not consider in the borders)


        cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts)
        cnm.fit(images)

        #%% COMPONENT EVALUATION
        # the components are evaluated in two ways:
        #   a) the shape of each component must be correlated with the data
        #   b) a minimum peak SNR is required over the length of a transient
        # Note that here we do not use the CNN based classifier, because it was trained on 2p not 1p data

        min_SNR = 2            # adaptive way to set threshold on the transient size
        r_values_min = 0.75    # threshold on space consistency (if you lower more components
        #                        will be accepted, potentially with worst quality)
        cnm.params.set('quality', {'min_SNR': min_SNR,
                                   'rval_thr': r_values_min,
                                   'use_cnn': False})
        cnm.estimates.evaluate_components(images, cnm.params, dview=dview)
示例#27
0
# pick thresholds
inspect_correlation_pnr(cn_filter,pnr)

#%% start cluster
try:
    dview.terminate()
    dview = None
except:
    pass
c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)
#%%
cnm = cnmf.CNMF(n_processes=n_processes, method_init='corr_pnr', k=10,
                gSig=gSig, gSiz=gSiz, merge_thresh=.8, p=1, dview=dview,
                tsub=1, ssub=1, Ain=None, rf=(50, 50), stride=(32, 32), only_init_patch=True,
                gnb=16, nb_patch=16, method_deconvolution='oasis', low_rank_background=True,
                update_background_components=True, min_corr=min_corr, min_pnr=min_pnr,
                normalize_init=False, deconvolve_options_init=None,
                ring_size_factor=1.5, center_psf=center_psf, del_duplicates=True)

cnm.fit(Y)
# %% DISCARD LOW QUALITY COMPONENT
final_frate = 10
r_values_min = 0.9  # threshold on space consistency
fitness_min = -100 # threshold on time variability
# threshold on time variability (if nonsparse activity)
fitness_delta_min = - 100
Npeaks = 5
traces = cnm.C + cnm.YrA
# TODO: todocument
idx_components, idx_components_bad = cm.components_evaluation.estimate_components_quality(
示例#28
0
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T, ), order='F')

#%%
Cn = cnmf.utilities.local_correlations(Y[:, :, :3000])
pl.imshow(Cn, cmap='gray')

#%%
if not is_patches:
    #%%

    K = 35  # number of neurons expected per patch
    gSig = [7, 7]  # expected half size of neurons
    merge_thresh = 0.8  # merging threshold, max correlation allowed
    p = 2  #order of the autoregressive system
    cnm=cnmf.CNMF(n_processes, method_init=init_method, k=K,gSig=gSig,merge_thresh=merge_thresh,\
                        p=p,dview=dview,Ain=None)
    cnm = cnm.fit(images)

#%%
else:
    #%%
    rf = 15  # half-size of the patches in pixels. rf=25, patches are 50x50
    stride = 4  #amounpl.it of overlap between the patches in pixels
    K = 6  # number of neurons expected per patch
    gSig = [7, 7]  # expected half size of neurons
    merge_thresh = 0.8  # merging threshold, max correlation allowed
    p = 2  #order of the autoregressive system
    memory_fact = 1
    #unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
    save_results = False
    #%% RUN ALGORITHM ON PATCHES
示例#29
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% load data

    fname = os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    Y = cm.load(fname).astype(np.float32)  #
    # used as a background image
    Cn = cm.local_correlations(Y.transpose(1, 2, 0))
    #%% set up some parameters

    # frame rate (Hz)
    fr = 10
    # approximate length of transient event in seconds
    decay_time = 0.5
    # expected half size of neurons
    gSig = [6, 6]
    # order of AR indicator dynamics
    p = 1
    # minimum SNR for accepting new components
    min_SNR = 3.5
    # correlation threshold for new component inclusion
    rval_thr = 0.90
    # number of background components
    gnb = 3

    # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)

    # number of shapes to be updated each time (put this to a finite small value to increase speed)
    max_comp_update_shape = np.inf
    # maximum number of expected components used for memory pre-allocation (exaggerate here)
    expected_comps = 50
    # number of timesteps to consider when testing new neuron candidates
    N_samples = np.ceil(fr * decay_time)
    # exceptionality threshold
    thresh_fitness_raw = log_ndtr(-min_SNR) * N_samples
    # total length of file
    T1 = Y.shape[0]

    # set up CNMF initialization parameters

    # merging threshold, max correlation allowed
    merge_thresh = 0.8
    # number of frames for initialization (presumably from the first file)
    initbatch = 400
    # size of patch
    patch_size = 32
    # amount of overlap between patches
    stride = 3
    # max number of components in each patch
    K = 4

    #%% obtain initial batch file used for initialization
    # memory map file (not needed)
    fname_new = Y[:initbatch].save(os.path.join(caiman_datadir(),
                                                'example_movies', 'demo.mmap'),
                                   order='C')
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Cn_init = cm.local_correlations(np.reshape(Yr, dims + (T, ), order='F'))

    #%% RUN (offline) CNMF algorithm on the initial batch
    pl.close('all')
    cnm_init = cnmf.CNMF(2,
                         k=K,
                         gSig=gSig,
                         merge_thresh=merge_thresh,
                         fr=fr,
                         p=p,
                         rf=patch_size // 2,
                         stride=stride,
                         skip_refinement=False,
                         normalize_init=False,
                         options_local_NMF=None,
                         minibatch_shape=100,
                         minibatch_suff_stat=5,
                         update_num_comps=True,
                         rval_thr=rval_thr,
                         thresh_fitness_delta=-50,
                         gnb=gnb,
                         decay_time=decay_time,
                         thresh_fitness_raw=thresh_fitness_raw,
                         batch_update_suff_stat=False,
                         max_comp_update_shape=max_comp_update_shape,
                         expected_comps=expected_comps,
                         dview=None,
                         min_SNR=min_SNR)

    cnm_init = cnm_init.fit(images)

    print(('Number of components:' + str(cnm_init.estimates.A.shape[-1])))

    pl.figure()
    crd = plot_contours(cnm_init.estimates.A.tocsc(), Cn_init, thr=0.9)

    #%% run (online) OnACID algorithm

    cnm = deepcopy(cnm_init)
    cnm.params.data['dims'] = (60, 80)
    cnm._prepare_object(np.asarray(Yr), T1)

    t = initbatch

    Y_ = cm.load(fname)[initbatch:].astype(np.float32)
    for frame_count, frame in enumerate(Y_):
        cnm.fit_next(t, frame.copy().reshape(-1, order='F'))
        t += 1

#%% extract the results

    C, f = cnm.estimates.C_on[gnb:cnm.M], cnm.estimates.C_on[:gnb]
    A, b = cnm.estimates.Ab[:, gnb:cnm.M], cnm.estimates.Ab[:, :gnb]
    print(('Number of components:' + str(A.shape[-1])))

    #%% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes)
    use_CNN = True
    if use_CNN:
        # threshold for CNN classifier
        thresh_cnn = 0.1
        from caiman.components_evaluation import evaluate_components_CNN
        predictions, final_crops = evaluate_components_CNN(
            A,
            dims,
            gSig,
            model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model'))
        A_exclude, C_exclude = A[:, predictions[:, 1] < thresh_cnn], C[
            predictions[:, 1] < thresh_cnn]
        A, C = A[:,
                 predictions[:,
                             1] >= thresh_cnn], C[predictions[:,
                                                              1] >= thresh_cnn]
        noisyC = cnm.estimates.noisyC[gnb:cnm.M]
        YrA = noisyC[predictions[:, 1] >= thresh_cnn] - C
    else:
        YrA = cnm.estimates.noisyC[gnb:cnm.M] - C

#%% plot results
    pl.figure()
    crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)

    view_patches_bar(Yr, A, C, b, f, dims[0], dims[1], YrA, img=Cn)
示例#30
0
def run_cnmfe(
    file_path: str,
    n_cpus: int = 1,
    mc_settings: dict = {},
    cnmf_settings: dict = {},
    qc_settings: dict = {},
):
    caiman_path = os.path.abspath(cm.__file__)
    print(f'caiman location: {caiman_path}')
    sys.stdout.flush()

    # load and update the pipeline settings
    mc_parameters = DEFAULT_MCORR_SETTINGS
    for k, v in mc_settings.items():
        mc_parameters[k] = v
    cnmf_parameters = DEFAULT_CNMF_PARAMETERS
    for k, v in cnmf_settings.items():
        cnmf_parameters[k] = v
    qc_parameters = DEFAULT_QC_PARAMETERS
    for k, v in qc_settings.items():
        qc_parameters[k] = v

    opts = params.CNMFParams(params_dict=mc_parameters)
    opts.change_params(params_dict=cnmf_parameters)
    opts.change_params(params_dict=qc_parameters)

    # add the image files to the data param
    opts.set('data', {'fnames': [file_path]})

    if n_cpus > 1:
        print("starting server")
        n_proc = np.max([(n_cpus - 1), 1])
        c, dview, n_processes = cm.cluster.setup_cluster(backend="local",
                                                         n_processes=n_proc,
                                                         single_thread=False)

        print(n_processes)
        sleep(30)
    else:
        print('no multiprocessing')
        dview = None
        n_processes = 1

    print('starting cnmfe')
    sys.stdout.flush()

    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm.fit_file(motion_correct=False)

    print("saving results")
    cnm.save(cnm.mmap_file[:-4] + "hdf5")

    # save the parameters in the same dir as the results
    final_params = cnm.params.to_dict()
    path_base = os.path.dirname(cnm.mmap_file)
    params_file = os.path.join(path_base, "all_caiman_parameters.pkl")
    with open(params_file, "wb") as fp:
        pickle.dump(final_params, fp)

    print("stopping server")
    cm.stop_server(dview=dview)