Cn = np.array(
                    cm.load(('/'.join(params_movie['gtname'].split('/')[:-2] +
                                      ['projections', 'correlation_image.tif'])
                             ))).squeeze()

        check_nan = False
        # %% start cluster
        # TODO: show screenshot 10
        try:
            cm.stop_server()
            dview.terminate()
        except:
            print('No clusters to stop')

        c, dview, n_processes = setup_cluster(backend=backend_patch,
                                              n_processes=n_processes,
                                              single_thread=False)
        # %%
        params_dict = {
            'fnames': [fname_new],
            'fr': params_movie['fr'],
            'decay_time': params_movie['decay_time'],
            'rf': params_movie['rf'],
            'stride': params_movie['stride_cnmf'],
            'K': params_movie['K'],
            'gSig': params_movie['gSig'],
            'merge_thr': params_movie['merge_thresh'],
            'p': global_params['p'],
            'nb': global_params['gnb'],
            'only_init': global_params['only_init_patch'],
            'dview': dview,
Beispiel #2
0
from ipyparallel import Client
# mpl.use('Qt5Agg')
import pylab as pl
pl.ion()
#%%

import caiman as cm
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.source_extraction.cnmf.utilities import extract_DF_F
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.utils.utils import download_demo
from caiman.cluster import setup_cluster
# %% RUN ANALYSIS
c, dview, n_processes = setup_cluster(backend='local',
                                      n_processes=None,
                                      single_thread=False)
#%%
is_patches = True
is_dendrites = False

if is_dendrites == True:
    # THIS METHOd CAN GIVE POSSIBLY INCONSISTENT RESULTS ON SOMAS WHEN NOT USED WITH PATCHES
    init_method = 'sparse_nmf'
    alpha_snmf = 10e1  # this controls sparsity
else:
    init_method = 'greedy_roi'
    alpha_snmf = None  #10e2  # this controls sparsity

#%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE
fnames = ['demoMovieJ.tif']
Beispiel #3
0
results = []
# %%
for params_movie in np.array(params_movies)[ID]:
    # %% start cluster
    #
    # TODO: show screenshot 10
    fname_new = os.path.join(base_folder, params_movie['fname'])
    if generate_data:
        try:
            cm.stop_server()
            dview.terminate()
        except:
            print('No clusters to stop')

        c, dview, n_processes = setup_cluster(backend='multiprocessing',
                                              n_processes=20,
                                              single_thread=False)

        # %% prepare ground truth masks

        gt_file = os.path.join(
            os.path.split(fname_new)[0],
            os.path.split(fname_new)[1][:-4] + 'match_masks.npz')
        gSig = params_movie['gSig']
        with np.load(gt_file, encoding='latin1') as ld:
            Cn_orig = ld['Cn']
            dims = (ld['d1'][()], ld['d2'][()])
            gt_estimate = Estimates(A=scipy.sparse.csc_matrix(ld['A_gt'][()]),
                                    b=ld['b_gt'],
                                    C=ld['C_gt'],
                                    f=ld['f_gt'],
Beispiel #4
0
def run(work_dir: str, UUID: str, save_temp_files: str):
    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format=
        "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s"
    )

    start_time = time()

    batch_dir = os.environ['CURR_BATCH_DIR']
    save_temp_files = bool(int(save_temp_files))

    output = {'status': 0, 'output_info': ''}
    n_processes = int(os.environ['_MESMERIZE_N_THREADS'])

    filepath = os.path.join(work_dir, UUID)

    imgpath = f'{filepath}_input.tiff'
    input_params = pickle.load(open(f'{filepath}.params', 'rb'))

    print('******** Creating process pool *********')
    c, dview, n_processes = setup_cluster(backend='local',
                                          n_processes=n_processes,
                                          single_thread=False,
                                          ignore_preexisting=True)

    try:
        if input_params['use_memmap']:
            memmap_uuid = input_params['memmap_uuid']

            memmap_batchdir = glob(
                os.path.join(batch_dir, f'memmap-{memmap_uuid}*.mmap'))

            # Check batch dir
            if len(memmap_batchdir) > 0:
                memmap_path = memmap_batchdir[0]
                print(
                    f'********** Found existing memmap in batch dir: {memmap_path} ********** '
                )

                # copy to work dir
                if not os.path.samefile(batch_dir, work_dir):
                    print('**** Copying memmap to work dir ****')
                    shutil.copy(memmap_path, work_dir)
                    memmap_path = glob(
                        os.path.join(work_dir,
                                     f'memmap-{memmap_uuid}*.mmap'))[0]

            else:
                # remake the memmap with the same UUID so that future batch items can rely on it
                print(
                    '********** Memmap not found, re-making memmap with the same UUID **********'
                )
                memmap_path = cm.save_memmap([imgpath],
                                             base_name=f'memmap-{memmap_uuid}',
                                             is_3D=True,
                                             order='C',
                                             dview=dview)

        else:
            print('********** Making memmap **********')
            memmap_path = cm.save_memmap([imgpath],
                                         base_name=f'memmap-{UUID}',
                                         is_3D=True,
                                         order='C',
                                         dview=dview)

        print(f'Using memmap:\n{memmap_path}')

        print('********** Loading memmap **********')
        Yr, dims, T = cm.load_memmap(memmap_path)
        Y = np.reshape(Yr, dims + (T, ), order='F')

        images = np.reshape(Yr.T, [T] + list(dims), order='F')

        if input_params['use_patches']:
            cnm = cnmf.CNMF(n_processes=n_processes,
                            dview=dview,
                            only_init_patch=True,
                            **input_params['cnmf_kwargs'])

        else:
            cnm = cnmf.CNMF(n_processes,
                            dview=dview,
                            **input_params['cnmf_kwargs'])

        cnm.fit(images)

        print('Number of components:' + str(cnm.estimates.A.shape[-1]))

        cnm.params.change_params(
            params_dict={
                **input_params['eval_kwargs'], 'use_cnn': False
            })

        cnm.estimates.evaluate_components(images, cnm.params, dview=dview)

        if input_params['refit']:
            cnm.params.set('temporal', {'p': input_params['cnmf_kwargs']['p']})
            cnm_ = cnm.refit(images)
        else:
            cnm_ = cnm

        out_filename = f'{UUID}_results.hdf5'
        cnm_.save(out_filename)

        output_files = [out_filename]

        # Save the memmap
        if save_temp_files:
            print("***** Keeping memmap file *****")

            # copy to batch dir if batch_dir != work_dir
            if not os.path.samefile(batch_dir, work_dir):
                print("***** Copying memmap file to batch dir *****")
                shutil.copy(memmap_path, batch_dir)

        # Delete the memmap from the work dir
        if not os.path.samefile(batch_dir, work_dir):
            print("***** Deleting memmap files from work dir *****")
            try:
                os.remove(memmap_path)
            except:
                pass

        output.update({
            'output': UUID,
            'status': 1,
            'output_files': output_files,
            'saved_memmap': save_temp_files
        })

    except Exception as e:
        output.update({'status': 0, 'output_info': traceback.format_exc()})

    cm.stop_server(dview=dview)

    end_time = time()
    processing_time = (end_time - start_time) / 60
    output.update({'processing_time': processing_time})

    json.dump(output, open(filepath + '.out', 'w'))
import scipy
from ipyparallel import Client
# mpl.use('Qt5Agg')
import pylab as pl
pl.ion()
#%%

import caiman as cm
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.source_extraction.cnmf.utilities import extract_DF_F
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.utils.utils import download_demo
from caiman.cluster import setup_cluster
# %% RUN ANALYSIS
c, dview, n_processes = setup_cluster(
    backend='local', n_processes=None, single_thread=False)
#%%
is_patches = True
is_dendrites = False

if is_dendrites == True:
    # THIS METHOd CAN GIVE POSSIBLY INCONSISTENT RESULTS ON SOMAS WHEN NOT USED WITH PATCHES
    init_method = 'sparse_nmf'
    alpha_snmf = 10e1  # this controls sparsity
else:
    init_method = 'greedy_roi'
    alpha_snmf = None  # 10e2  # this controls sparsity

#%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE
fnames = ['demoMovieJ.tif']
base_folder = './example_movies/'  # folder containing the demo files
        strides = parameters[
            'strides']  # start a new patch for pw-rigid motion correction every x pixels
        overlaps = parameters[
            'overlaps']  # overlap between patches. Patch size = strides + overlaps
        splits_els = parameters[
            'splits_els']  # for parallelisation split movies in num_splits chunks across time.
        # Tip: make sure len(movie)/num_splits > 100

        upsample_factor_grid = parameters[
            'upsample_factor_grid']  # upsample factor to avoid smearing when merging patches
        max_deviation_rigid = parameters[
            'max_deviation_rigid']  # maximum deviation allowed for patch w.r.t. rigid shifts.

        c, dview, n_processes = setup_cluster(
            backend=args.backend,  # use this one
            n_processes=args.
            num_processes,  # number of processes to use, if you go out of memory try to reduce this one
        )

        if path.exists(path.splitext(filename)[0] + '_mc_template.npy'):

            mc_template = np.load(
                path.splitext(filename)[0] + '_mc_template.npy')
            bord_px = cell_info[animal][session]['motion_correction'][
                'bord_moco']
        else:
            # Motion correction rigid to obtain template
            mc = motion_correct_oneP_rigid(fnames,
                                           gSig_filt=gSig_filt,
                                           max_shifts=max_shifts,
                                           splits_rig=splits_rig,