Ejemplo n.º 1
0
def test_computational_performance(fnames, path_ROIs, n_processes):
    import os
    import cv2
    import glob
    import logging
    import matplotlib.pyplot as plt
    import numpy as np
    import tensorflow as tf
    import h5py
    from time import time

    try:
        cv2.setNumThreads(0)
    except:
        pass

    try:
        if __IPYTHON__:
            # this is used for debugging purposes only. allows to reload classes
            # when changed
            get_ipython().magic('load_ext autoreload')
            get_ipython().magic('autoreload 2')
    except NameError:
        pass

    import caiman as cm
    from caiman.motion_correction import MotionCorrect
    from caiman.utils.utils import download_demo, download_model
    from caiman.source_extraction.volpy.volparams import volparams
    from caiman.source_extraction.volpy.volpy import VOLPY
    from caiman.source_extraction.volpy.mrcnn import visualize, neurons
    import caiman.source_extraction.volpy.mrcnn.model as modellib
    from caiman.paths import caiman_datadir
    from caiman.summary_images import local_correlations_movie_offline
    from caiman.summary_images import mean_image
    from caiman.source_extraction.volpy.utils import quick_annotation
    from multiprocessing import Pool

    time_start = time()
    print('Start MOTION CORRECTION')

    # %%  Load demo movie and ROIs
    fnames = fnames
    path_ROIs = path_ROIs

    #%% dataset dependent parameters
    # dataset dependent parameters
    fr = 400  # sample rate of the movie

    # motion correction parameters
    pw_rigid = False  # flag for pw-rigid motion correction
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    # change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    max_deviation_rigid = 3  # maximum deviation allowed for patch with respect to rigid shifts
    border_nan = 'copy'

    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = volparams(params_dict=opts_dict)

    # %% start a cluster for parallel processing
    dview = Pool(n_processes)
    #dview = None
    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # Run correction
    mc.motion_correct(save_movie=True)

    time_mc = time() - time_start
    print(time_mc)
    print('START MEMORY MAPPING')

    # %% restart cluster to clean up memory
    dview.terminate()
    dview = Pool(n_processes)

    # %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap_join(mc.mmap_file,
                                    base_name='memmap_',
                                    add_to_mov=border_to_0,
                                    dview=dview,
                                    n_chunks=1000)  # exclude border

    time_mmap = time() - time_start - time_mc
    print('Start Segmentation')
    # %% SEGMENTATION
    # create summary images
    img = mean_image(mc.mmap_file[0], window=1000, dview=dview)
    img = (img - np.mean(img)) / np.std(img)
    Cn = local_correlations_movie_offline(mc.mmap_file[0],
                                          fr=fr,
                                          window=1500,
                                          stride=1500,
                                          winSize_baseline=400,
                                          remove_baseline=True,
                                          dview=dview).max(axis=0)
    img_corr = (Cn - np.mean(Cn)) / np.std(Cn)
    summary_image = np.stack([img, img, img_corr], axis=2).astype(np.float32)

    #%% three methods for segmentation
    methods_list = [
        'manual_annotation',  # manual annotation needs user to prepare annotated datasets same format as demo ROIs 
        'quick_annotation',  # quick annotation annotates data with simple interface in python
        'maskrcnn'
    ]  # maskrcnn is a convolutional network trained for finding neurons using summary images
    method = methods_list[0]
    if method == 'manual_annotation':
        with h5py.File(path_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]  # load ROIs

    elif method == 'quick_annotation':
        ROIs = quick_annotation(img_corr, min_radius=4, max_radius=10)

    elif method == 'maskrcnn':
        config = neurons.NeuronsConfig()

        class InferenceConfig(config.__class__):
            # Run detection on one image at a time
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0.7
            IMAGE_RESIZE_MODE = "pad64"
            IMAGE_MAX_DIM = 512
            RPN_NMS_THRESHOLD = 0.7
            POST_NMS_ROIS_INFERENCE = 1000

        config = InferenceConfig()
        config.display()
        model_dir = os.path.join(caiman_datadir(), 'model')
        DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0
        with tf.device(DEVICE):
            model = modellib.MaskRCNN(mode="inference",
                                      model_dir=model_dir,
                                      config=config)
        weights_path = download_model('mask_rcnn')
        model.load_weights(weights_path, by_name=True)
        results = model.detect([summary_image], verbose=1)
        r = results[0]
        ROIs = r['masks'].transpose([2, 0, 1])

        display_result = False
        if display_result:
            _, ax = plt.subplots(1, 1, figsize=(16, 16))
            visualize.display_instances(summary_image,
                                        r['rois'],
                                        r['masks'],
                                        r['class_ids'], ['BG', 'neurons'],
                                        r['scores'],
                                        ax=ax,
                                        title="Predictions")

    time_seg = time() - time_mmap - time_mc - time_start
    print('Start SPIKE EXTRACTION')

    # %% restart cluster to clean up memory
    dview.terminate()
    dview = Pool(n_processes, maxtasksperchild=1)

    # %% parameters for trace denoising and spike extraction
    fnames = fname_new  # change file
    ROIs = ROIs  # region of interests
    index = list(range(len(ROIs)))  # index of neurons
    weights = None  # reuse spatial weights

    tau_lp = 5  # parameter for high-pass filter to remove photobleaching
    threshold = 4  # threshold for finding spikes, increase threshold to find less spikes
    contextSize = 35  # number of pixels surrounding the ROI to censor from the background PCA
    flip_signal = True  # Important! Flip signal or not, True for Voltron indicator, False for others

    opts_dict = {
        'fnames': fnames,
        'ROIs': ROIs,
        'index': index,
        'weights': weights,
        'tau_lp': tau_lp,
        'threshold': threshold,
        'contextSize': contextSize,
        'flip_signal': flip_signal
    }

    opts.change_params(params_dict=opts_dict)

    #%% Trace Denoising and Spike Extraction
    vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts)
    vpy.fit(n_processes=n_processes, dview=dview)

    # %% STOP CLUSTER and clean up log files
    #dview.terminate()
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    time_ext = time() - time_mmap - time_mc - time_start - time_seg

    #%%
    print('file:' + fnames)
    print('number of processes' + str(n_processes))
    print(time_mc)
    print(time_mmap)
    print(time_seg)
    print(time_ext)
    time_list = [time_mc, time_mmap, time_seg, time_ext]

    return time_list
Ejemplo n.º 2
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

#%% Select file(s) to be processed (download if not present)
    fnames = ['Sue_2x_3000_40_-46.tif']  # filename to be processed
    if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
        fnames = [download_demo(fnames[0])]

#%% First setup some parameters for data and motion correction

    # dataset dependent parameters
    fr = 30             # imaging rate in frames per second
    decay_time = 0.4    # length of a typical transient in seconds
    dxy = (2., 2.)      # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)       # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um

    # motion correction parameters
    pw_rigid = True       # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
    # start a new patch for pw-rigid motion correction every x pixels
    strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
    # overlap between pathes (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

# %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q
    display_images = True

    if display_images:
        m_orig = cm.load_movie_chain(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # note that the file is not loaded in memory

# %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

# %% compare with original movie
    if display_images:
        m_orig = cm.load_movie_chain(fnames)
        m_els = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie,
                                      m_els.resize(1, 1, ds_ratio)], axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit

# %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file, base_name='memmap_', order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

# %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%  parameters for source extraction and deconvolution
    p = 1                    # order of the autoregressive system
    gnb = 2                  # number of global background components
    merge_thr = 0.85         # merging threshold, max correlation allowed
    rf = 15
    # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6          # amount of overlap between the patches in pixels
    K = 4                    # number of components per patch
    gSig = [4, 4]            # expected half size of neurons in pixels
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    method_init = 'greedy_roi'
    ssub = 2                     # spatial subsampling during initialization
    tsub = 2                     # temporal subsampling during intialization

    # parameters for component evaluation
    opts_dict = {'fnames': fnames,
                 'p': p,
                 'fr': fr,
                 'nb': gnb,
                 'rf': rf,
                 'K': K,
                 'gSig': gSig,
                 'stride': stride_cnmf,
                 'method_init': method_init,
                 'rolling_sum': True,
                 'merge_thr': merge_thr,
                 'n_processes': n_processes,
                 'only_init': True,
                 'ssub': ssub,
                 'tsub': tsub}

    opts.change_params(params_dict=opts_dict);
# %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0). If you want to have
    # deconvolution within each patch change params.patch['p_patch'] to a
    # nonzero value

    #opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

# %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)

# %% plot contours of found components
    Cns = local_correlations_movie_offline(mc.mmap_file[0],
                                           remove_baseline=True, window=1000, stride=1000,
                                           winSize_baseline=100, quantil_min_baseline=10,
                                           dview=dview)
    Cn = Cns.max(axis=0)
    Cn[np.isnan(Cn)] = 0
    cnm.estimates.plot_contours(img=Cn)
    plt.title('Contour plots of found components')
#%% save results
    cnm.estimates.Cn = Cn
    cnm.save(fname_new[:-5]+'_init.hdf5')

# %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # signal to noise ratio for accepting a component
    rval_thr = 0.85  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected

    cnm2.params.set('quality', {'decay_time': decay_time,
                               'min_SNR': min_SNR,
                               'rval_thr': rval_thr,
                               'use_cnn': True,
                               'min_cnn_thr': cnn_thr,
                               'cnn_lowest': cnn_lowest})
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)
    # %% PLOT COMPONENTS
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)

    # %% VIEW TRACES (accepted and rejected)

    if display_images:
        cnm2.estimates.view_components(images, img=Cn,
                                      idx=cnm2.estimates.idx_components)
        cnm2.estimates.view_components(images, img=Cn,
                                      idx=cnm2.estimates.idx_components_bad)
    #%% update object with selected components
    cnm2.estimates.select_components(use_object=True)
    #%% Extract DF/F values
    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)

    #%% Show final traces
    cnm2.estimates.view_components(img=Cn)
    #%%
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')
    #%% reconstruct denoised movie (press q to exit)
    if display_images:
        cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2,
                                  magnification=2,
                                  bpx=border_to_0,
                                  include_bck=False)  # background not shown

    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Ejemplo n.º 3
0
m1 = cm.load('403106_3min_rois_mc_lp.hdf5') 
#%%#
mcr = cm.load('/home/andrea/NEL-LAB Dropbox/Andrea Giovannucci/Kaspar-Andrea/exampledata/Other/403106_3min_rois_mc_lp_crop.hdf5')
#%%
mcr = cm.load('/home/andrea/NEL-LAB Dropbox/Andrea Giovannucci/Kaspar-Andrea/exampledata/Other/403106_3min_rois_mc_lp_crop_2.hdf5')
#%%
fname = '/home/nel/data/voltage_data/Marton/454597/Cell_0/40x_patch1/movie/40x_patch1_000_mc_small.hdf5'

#%%
mcr = cm.load(fname)

ycr = mcr.to_2D() 

ycr = ycr - ycr.min()

mcr_lc = local_correlations_movie_offline(fname)
ycr_lc = mcr_lc.to_2D()
#%%
D_lc,tr_lc = spams.nmf(np.asfortranarray(ycr_lc.T), K=2, return_lasso=True)   
plt.figure();plt.plot(tr_lc.T.toarray()) 
plt.figure();plt.imshow(D_lc[:,0].reshape(mcr.shape[1:], order='F'), cmap='gray')
#%%
D,tr = spams.trainDL(np.asfortranarray(ycr.T), K=2, D=D_lc, lambda1=0)


#%%
D,tr = spams.nnsc(np.asfortranarray(ycr.T), K=2, return_lasso=True, lambda1=0)
#%%
D,tr = spams.nmf(np.asfortranarray(np.abs(ycr.T)), K=2, return_lasso=True) 
#%%
D,tr = spams.nnsc(np.asfortranarray(ycr.T), K=2, return_lasso=True, lambda1=1)
Ejemplo n.º 4
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    # %% set up some parameters
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # file(s) to be analyzed
    is_patches = True  # flag for processing in patches or not
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    params_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'rf': rf,
        'stride': stride,
        'K': K,
        'gSig': gSig,
        'merge_thr': merge_thresh,
        'p': p,
        'nb': gnb
    }

    opts = params.CNMFParams(params_dict=params_dict)
    # %% Now RUN CaImAn Batch (CNMF)
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit_file()

    # %% plot contour plots of components
    Cns = local_correlations_movie_offline(fnames[0],
                                           remove_baseline=True,
                                           swap_dim=False,
                                           window=1000,
                                           stride=1000,
                                           winSize_baseline=100,
                                           quantil_min_baseline=10,
                                           dview=dview)
    Cn = Cns.max(axis=0)
    cnm.estimates.plot_contours(img=Cn)

    # %% load memory mapped file
    Yr, dims, T = cm.load_memmap(cnm.mmap_file)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    # %% refit
    cnm2 = cnm.refit(images, dview=dview)

    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)

    min_SNR = 2  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.85  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.99  # if cnn classifier predicts below this value, reject
    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected

    cnm2.params.set(
        'quality', {
            'min_SNR': min_SNR,
            'rval_thr': rval_thr,
            'use_cnn': use_cnn,
            'min_cnn_thr': min_cnn_thr,
            'cnn_lowest': cnn_lowest
        })

    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% visualize selected and rejected components
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    # %% visualize selected components
    cnm2.estimates.view_components(images,
                                   idx=cnm2.estimates.idx_components,
                                   img=Cn)
    #%% only select high quality components (destructive)
    # cnm2.estimates.select_components(use_object=True)
    # cnm2.estimates.plot_contours(img=Cn)
    #%% save results
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')

    # %% play movie with results (original, reconstructed, amplified residual)
    cnm2.estimates.play_movie(images, magnification=4)

    # %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Ejemplo n.º 5
0
def run_volpy(fnames,
              options=None,
              do_motion_correction=True,
              do_memory_mapping=True,
              fr=400):
    #pass  # For compatibility between running under Spyder and the CLI

    # %%  Load demo movie and ROIs
    file_dir = os.path.split(fnames)[0]
    path_ROIs = [file for file in os.listdir(file_dir) if 'ROIs_gt' in file]
    if len(path_ROIs) > 0:
        path_ROIs = path_ROIs[0]
    #path_ROIs = '/home/nel/NEL-LAB Dropbox/NEL/Datasets/voltage_lin/peyman_golshani/ROIs.hdf5'

#%% dataset dependent parameters
# dataset dependent parameters
    fr = fr  # sample rate of the movie

    # motion correction parameters
    pw_rigid = False  # flag for pw-rigid motion correction
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    # change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    max_deviation_rigid = 3  # maximum deviation allowed for patch with respect to rigid shifts
    border_nan = 'copy'

    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = volparams(params_dict=opts_dict)

    # %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # Run correction
    do_motion_correction = do_motion_correction
    if do_motion_correction:
        mc.motion_correct(save_movie=True)
    else:
        mc_list = [
            file for file in os.listdir(file_dir)
            if (os.path.splitext(os.path.split(fnames)[-1])[0] in file
                and '.mmap' in file)
        ]
        mc.mmap_file = [os.path.join(file_dir, mc_list[0])]
        print(f'reuse previously saved motion corrected file:{mc.mmap_file}')

# %% MEMORY MAPPING
    do_memory_mapping = do_memory_mapping
    if do_memory_mapping:
        border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0
        # you can include the boundaries of the FOV if you used the 'copy' option
        # during motion correction, although be careful about the components near
        # the boundaries

        # memory map the file in order 'C'
        fname_new = cm.save_memmap_join(
            mc.mmap_file,
            base_name='memmap_' +
            os.path.splitext(os.path.split(fnames)[-1])[0],
            add_to_mov=border_to_0,
            dview=dview)  # exclude border
    else:
        mmap_list = [
            file for file in os.listdir(file_dir)
            if ('memmap_' +
                os.path.splitext(os.path.split(fnames)[-1])[0]) in file
        ]
        fname_new = os.path.join(file_dir, mmap_list[0])
        print(f'reuse previously saved memory mapping file:{fname_new}')

# %% SEGMENTATION
# create summary images
    img = mean_image(mc.mmap_file[0], window=1000, dview=dview)
    img = (img - np.mean(img)) / np.std(img)

    gaussian_blur = False  # Use gaussian blur when there is too much noise in the video
    Cn = local_correlations_movie_offline(mc.mmap_file[0],
                                          fr=fr,
                                          window=fr * 4,
                                          stride=fr * 4,
                                          winSize_baseline=fr,
                                          remove_baseline=True,
                                          gaussian_blur=gaussian_blur,
                                          dview=dview).max(axis=0)
    img_corr = (Cn - np.mean(Cn)) / np.std(Cn)
    summary_images = np.stack([img, img, img_corr], axis=0).astype(np.float32)
    # ! save summary image, it is used in GUI
    cm.movie(summary_images).save(fnames[:-5] + '_summary_images.tif')
    #plt.imshow(summary_images[0])
    #%% three methods for segmentation
    methods_list = [
        'manual_annotation',  # manual annotation needs user to prepare annotated datasets same format as demo ROIs 
        'gui_annotation',  # use gui to manually annotate neurons, but this is still under developing
        'maskrcnn'
    ]  # maskrcnn is a convolutional network trained for finding neurons using summary images
    method = methods_list[0]
    if method == 'manual_annotation':
        #with h5py.File(path_ROIs, 'r') as fl:
        #    ROIs = fl['mov'][()]
        ROIs = np.load(os.path.join(file_dir, path_ROIs))

    elif method == 'gui_annotation':
        # run volpy_gui file in the caiman/source_extraction/volpy folder
        # load the summary images you have just saved
        # save the ROIs to the video folder
        path_ROIs = caiman_datadir() + '/example_movies/volpy/gui_roi.hdf5'
        with h5py.File(path_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]

    elif method == 'maskrcnn':  # Important!! make sure install keras before using mask rcnn
        weights_path = download_model('mask_rcnn')
        weights_path = '/home/nel/Code/NEL_LAB/Mask_RCNN/logs/neurons20200824T1032/mask_rcnn_neurons_0040.h5'
        ROIs = utils.mrcnn_inference(
            img=summary_images.transpose([1, 2, 0]),
            size_range=[5, 100],
            weights_path=weights_path,
            display_result=True
        )  # size parameter decides size range of masks to be selected
        #np.save(os.path.join(file_dir, 'ROIs'), ROIs)
# %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False,
                                                     maxtasksperchild=1)

    # %% parameters for trace denoising and spike extraction
    ROIs = ROIs  # region of interests
    index = list(range(len(ROIs)))  # index of neurons
    weights = None  # reuse spatial weights

    context_size = 35  # number of pixels surrounding the ROI to censor from the background PCA
    flip_signal = True  # Important!! Flip signal or not, True for Voltron indicator, False for others
    hp_freq_pb = 1 / 3  # parameter for high-pass filter to remove photobleaching
    threshold_method = 'adaptive_threshold'  # 'simple' or 'adaptive_threshold'
    min_spikes = 30  # minimal spikes to be found
    threshold = 4  # threshold for finding spikes, increase threshold to find less spikes
    do_plot = False  # plot detail of spikes, template for the last iteration
    ridge_bg = 0.01  # ridge regression regularizer strength for background removement, larger value specifies stronger regularization
    sub_freq = 20  # frequency for subthreshold extraction
    weight_update = 'ridge'  # 'ridge' or 'NMF' for weight update
    n_iter = 2

    opts_dict = {
        'fnames': fname_new,
        'ROIs': ROIs,
        'index': index,
        'weights': weights,
        'context_size': context_size,
        'flip_signal': flip_signal,
        'hp_freq_pb': hp_freq_pb,
        'threshold_method': threshold_method,
        'min_spikes': min_spikes,
        'threshold': threshold,
        'do_plot': do_plot,
        'ridge_bg': ridge_bg,
        'sub_freq': sub_freq,
        'weight_update': weight_update,
        'n_iter': n_iter
    }

    opts.change_params(params_dict=opts_dict)

    if options is not None:
        print('using external options')
        opts.change_params(params_dict=options)
    else:
        print('not using external options')

#%% TRACE DENOISING AND SPIKE DETECTION
    vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts)
    vpy.fit(n_processes=n_processes, dview=dview)

    #%% visualization
    display_images = False
    if display_images:
        print(np.where(
            vpy.estimates['locality'])[0])  # neurons that pass locality test
        idx = np.where(vpy.estimates['locality'] > 0)[0]
        utils.view_components(vpy.estimates, img_corr, idx)

#%% reconstructed movie
# note the negative spatial weights is cutoff
    if display_images:
        mv_all = utils.reconstructed_movie(vpy.estimates,
                                           fnames=mc.mmap_file,
                                           idx=idx,
                                           scope=(0, 1000),
                                           flip_signal=flip_signal)
        mv_all.play(fr=40)

#%% save the result in .npy format
    save_result = True
    if save_result:
        vpy.estimates['ROIs'] = ROIs
        save_name = f'volpy_{os.path.split(fnames)[1][:-5]}_{opts.volspike["threshold_method"]}_{opts.volspike["threshold"]}_{opts.volspike["weight_update"]}_bg_{opts.volspike["ridge_bg"]}'
        np.save(os.path.join(file_dir, save_name), vpy.estimates)

# %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Ejemplo n.º 6
0
    elif file in files[HPC_set]:
        fr = 1000
        transpose = False
        flip = False
        gaussian_blur = False

    X = {}
    filename = root_dir +'/raw_data/' + file
    mov = cm.load(filename, fr=fr, in_memory=True)
    if transpose:
        mov = mov.transpose([0, 2, 1])
    mov = mov[1000:, :, :]
    plt.figure(); plt.plot(mov.mean((1,2))); plt.show()
    
    corr_img = local_correlations_movie_offline(filename, fr=fr, window=mov.shape[0], 
                                          stride=mov.shape[0], winSize_baseline=fr, 
                                          remove_baseline=True, gaussian_blur=gaussian_blur,
                                          dview=dview).max(axis=0)
    
    mov_rb =  mov.removeBL(fr)
    plt.figure(); plt.plot(mov_rb.mean((1,2))); plt.show()

    if transpose:
        corr_img = corr_img.T
    X['mean'] = np.mean(mov, axis=0)
    X['corr'] = corr_img
    X['std'] = np.std(mov_rb, 0)
    X['median'] = (mov_rb).bin_median()
    
    if flip == True:
        X['max'] = np.max(-mov_rb, 0)
    else:
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %%  Load demo movie and ROIs
    fnames = download_demo(
        'demo_voltage_imaging.hdf5',
        'volpy')  # file path to movie file (will download if not present)
    path_ROIs = download_demo(
        'demo_voltage_imaging_ROIs.hdf5',
        'volpy')  # file path to ROIs file (will download if not present)
    file_dir = os.path.split(fnames)[0]

    #%% dataset dependent parameters
    # dataset dependent parameters
    fr = 400  # sample rate of the movie

    # motion correction parameters
    pw_rigid = False  # flag for pw-rigid motion correction
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    # change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    max_deviation_rigid = 3  # maximum deviation allowed for patch with respect to rigid shifts
    border_nan = 'copy'

    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = volparams(params_dict=opts_dict)

    # %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the movie press q
    display_images = False

    if display_images:
        m_orig = cm.load(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=40, magnification=4)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # Run correction
    do_motion_correction = True
    if do_motion_correction:
        mc.motion_correct(save_movie=True)
    else:
        mc_list = [
            file for file in os.listdir(file_dir)
            if (os.path.splitext(os.path.split(fnames)[-1])[0] in file
                and '.mmap' in file)
        ]
        mc.mmap_file = [os.path.join(file_dir, mc_list[0])]
        print(f'reuse previously saved motion corrected file:{mc.mmap_file}')

# %% compare with original movie
    if display_images:
        m_orig = cm.load(fnames)
        m_rig = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate(
            [m_orig.resize(1, 1, ds_ratio),
             m_rig.resize(1, 1, ds_ratio)],
            axis=2)
        moviehandle.play(fr=40, q_max=99.5, magnification=4)  # press q to exit

# %% MEMORY MAPPING
    do_memory_mapping = True
    if do_memory_mapping:
        border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0
        # you can include the boundaries of the FOV if you used the 'copy' option
        # during motion correction, although be careful about the components near
        # the boundaries

        # memory map the file in order 'C'
        fname_new = cm.save_memmap_join(
            mc.mmap_file,
            base_name='memmap_' +
            os.path.splitext(os.path.split(fnames)[-1])[0],
            add_to_mov=border_to_0,
            dview=dview)  # exclude border
    else:
        mmap_list = [
            file for file in os.listdir(file_dir)
            if ('memmap_' +
                os.path.splitext(os.path.split(fnames)[-1])[0]) in file
        ]
        fname_new = os.path.join(file_dir, mmap_list[0])
        print(f'reuse previously saved memory mapping file:{fname_new}')

# %% SEGMENTATION
# create summary images
    img = mean_image(mc.mmap_file[0], window=1000, dview=dview)
    img = (img - np.mean(img)) / np.std(img)

    gaussian_blur = False  # Use gaussian blur when there is too much noise in the video
    Cn = local_correlations_movie_offline(mc.mmap_file[0],
                                          fr=fr,
                                          window=fr * 4,
                                          stride=fr * 4,
                                          winSize_baseline=fr,
                                          remove_baseline=True,
                                          gaussian_blur=gaussian_blur,
                                          dview=dview).max(axis=0)
    img_corr = (Cn - np.mean(Cn)) / np.std(Cn)
    summary_images = np.stack([img, img, img_corr], axis=0).astype(np.float32)
    # save summary images which are used in the VolPy GUI
    cm.movie(summary_images).save(fnames[:-5] + '_summary_images.tif')
    fig, axs = plt.subplots(1, 2)
    axs[0].imshow(summary_images[0])
    axs[1].imshow(summary_images[2])
    axs[0].set_title('mean image')
    axs[1].set_title('corr image')

    #%% methods for segmentation
    methods_list = [
        'manual_annotation',  # manual annotations need prepared annotated datasets in the same format as demo_voltage_imaging_ROIs.hdf5 
        'maskrcnn',  # Mask R-CNN is a convolutional neural network trained for detecting neurons in summary images
        'gui_annotation'
    ]  # use VolPy GUI to correct outputs of Mask R-CNN or annotate new datasets

    method = methods_list[0]
    if method == 'manual_annotation':
        with h5py.File(path_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]

    elif method == 'maskrcnn':  # Important!! Make sure install keras before using mask rcnn.
        weights_path = download_model(
            'mask_rcnn'
        )  # also make sure you have downloaded the new weight. The weight was updated on Dec 1st 2020.
        ROIs = utils.mrcnn_inference(
            img=summary_images.transpose([1, 2, 0]),
            size_range=[5, 22],
            weights_path=weights_path,
            display_result=True
        )  # size parameter decides size range of masks to be selected
        cm.movie(ROIs).save(fnames[:-5] + 'mrcnn_ROIs.hdf5')

    elif method == 'gui_annotation':
        # run volpy_gui.py file in the caiman/source_extraction/volpy folder
        gui_ROIs = caiman_datadir() + '/example_movies/volpy/gui_roi.hdf5'
        with h5py.File(gui_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]

    fig, axs = plt.subplots(1, 2)
    axs[0].imshow(summary_images[0])
    axs[1].imshow(ROIs.sum(0))
    axs[0].set_title('mean image')
    axs[1].set_title('masks')

    # %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False,
                                                     maxtasksperchild=1)

    # %% parameters for trace denoising and spike extraction
    ROIs = ROIs  # region of interests
    index = list(range(len(ROIs)))  # index of neurons
    weights = None  # reuse spatial weights

    context_size = 35  # number of pixels surrounding the ROI to censor from the background PCA
    visualize_ROI = False  # whether to visualize the region of interest inside the context region
    flip_signal = True  # Important!! Flip signal or not, True for Voltron indicator, False for others
    hp_freq_pb = 1 / 3  # parameter for high-pass filter to remove photobleaching
    clip = 100  # maximum number of spikes to form spike template
    threshold_method = 'adaptive_threshold'  # adaptive_threshold or simple
    min_spikes = 10  # minimal spikes to be found
    pnorm = 0.5  # a variable deciding the amount of spikes chosen for adaptive threshold method
    threshold = 3  # threshold for finding spikes only used in simple threshold method, Increase the threshold to find less spikes
    do_plot = False  # plot detail of spikes, template for the last iteration
    ridge_bg = 0.01  # ridge regression regularizer strength for background removement, larger value specifies stronger regularization
    sub_freq = 20  # frequency for subthreshold extraction
    weight_update = 'ridge'  # ridge or NMF for weight update
    n_iter = 2  # number of iterations alternating between estimating spike times and spatial filters

    opts_dict = {
        'fnames': fname_new,
        'ROIs': ROIs,
        'index': index,
        'weights': weights,
        'context_size': context_size,
        'visualize_ROI': visualize_ROI,
        'flip_signal': flip_signal,
        'hp_freq_pb': hp_freq_pb,
        'clip': clip,
        'threshold_method': threshold_method,
        'min_spikes': min_spikes,
        'pnorm': pnorm,
        'threshold': threshold,
        'do_plot': do_plot,
        'ridge_bg': ridge_bg,
        'sub_freq': sub_freq,
        'weight_update': weight_update,
        'n_iter': n_iter
    }

    opts.change_params(params_dict=opts_dict)

    #%% TRACE DENOISING AND SPIKE DETECTION
    vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts)
    vpy.fit(n_processes=n_processes, dview=dview)

    #%% visualization
    display_images = True
    if display_images:
        print(np.where(
            vpy.estimates['locality'])[0])  # neurons that pass locality test
        idx = np.where(vpy.estimates['locality'] > 0)[0]
        utils.view_components(vpy.estimates, img_corr, idx)

#%% reconstructed movie
# note the negative spatial weights is cutoff
    if display_images:
        mv_all = utils.reconstructed_movie(vpy.estimates.copy(),
                                           fnames=mc.mmap_file,
                                           idx=idx,
                                           scope=(0, 1000),
                                           flip_signal=flip_signal)
        mv_all.play(fr=40)

#%% save the result in .npy format
    save_result = True
    if save_result:
        vpy.estimates['ROIs'] = ROIs
        vpy.estimates['params'] = opts
        save_name = f'volpy_{os.path.split(fnames)[1][:-5]}_{threshold_method}'
        np.save(os.path.join(file_dir, save_name), vpy.estimates)

# %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Ejemplo n.º 8
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %%  Load demo movie and ROIs
    fnames = download_demo(
        'demo_voltage_imaging.hdf5',
        'volpy')  # file path to movie file (will download if not present)
    path_ROIs = download_demo(
        'demo_voltage_imaging_ROIs.hdf5',
        'volpy')  # file path to ROIs file (will download if not present)

    #%% dataset dependent parameters
    # dataset dependent parameters
    fr = 400  # sample rate of the movie

    # motion correction parameters
    pw_rigid = False  # flag for pw-rigid motion correction
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    # change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    max_deviation_rigid = 3  # maximum deviation allowed for patch with respect to rigid shifts
    border_nan = 'copy'

    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = volparams(params_dict=opts_dict)

    # %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the movie press q
    display_images = False

    if display_images:
        m_orig = cm.load(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=40, magnification=6)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # Run correction
    mc.motion_correct(save_movie=True)

    # %% compare with original movie
    if display_images:
        m_orig = cm.load(fnames)
        m_rig = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([
            m_orig.resize(1, 1, ds_ratio) - mc.min_mov * mc.nonneg_movie,
            m_rig.resize(1, 1, ds_ratio)
        ],
                                     axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=4)  # press q to exit

# %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap_join(mc.mmap_file,
                                    base_name='memmap_',
                                    add_to_mov=border_to_0,
                                    dview=dview)  # exclude border

    # %% SEGMENTATION
    # create summary images
    img = mean_image(mc.mmap_file[0], window=1000, dview=dview)
    img = (img - np.mean(img)) / np.std(img)

    gaussian_blur = False  # Use gaussian blur when the quality of corr image(Cn) is bad
    Cn = local_correlations_movie_offline(mc.mmap_file[0],
                                          fr=fr,
                                          window=fr * 4,
                                          stride=fr * 4,
                                          winSize_baseline=fr,
                                          remove_baseline=True,
                                          gaussian_blur=gaussian_blur,
                                          dview=dview).max(axis=0)
    img_corr = (Cn - np.mean(Cn)) / np.std(Cn)
    summary_image = np.stack([img, img, img_corr], axis=2).astype(np.float32)

    #%% three methods for segmentation
    methods_list = [
        'manual_annotation',  # manual annotation needs user to prepare annotated datasets same format as demo ROIs 
        'quick_annotation',  # quick annotation annotates data with simple interface in python
        'maskrcnn'
    ]  # maskrcnn is a convolutional network trained for finding neurons using summary images
    method = methods_list[0]
    if method == 'manual_annotation':
        with h5py.File(path_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]

    elif method == 'quick_annotation':
        ROIs = utils.quick_annotation(img, min_radius=4, max_radius=8)

    elif method == 'maskrcnn':  # Important!! make sure install keras before using mask rcnn
        weights_path = download_model('mask_rcnn')
        ROIs = utils.mrcnn_inference(img=summary_image,
                                     weights_path=weights_path,
                                     display_result=True)

# %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False,
                                                     maxtasksperchild=1)

    # %% parameters for trace denoising and spike extraction
    ROIs = ROIs  # region of interests
    index = list(range(len(ROIs)))  # index of neurons
    weights = None  # reuse spatial weights

    context_size = 35  # number of pixels surrounding the ROI to censor from the background PCA
    flip_signal = True  # Important!! Flip signal or not, True for Voltron indicator, False for others
    hp_freq_pb = 1 / 3  # parameter for high-pass filter to remove photobleaching
    threshold_method = 'simple'  # 'simple' or 'adaptive_threshold'
    min_spikes = 10  # minimal spikes to be found
    threshold = 3.5  # threshold for finding spikes, increase threshold to find less spikes
    do_plot = False  # plot detail of spikes, template for the last iteration
    ridge_bg = 0.001  # ridge regression regularizer strength for background removement
    sub_freq = 20  # frequency for subthreshold extraction
    weight_update = 'ridge'  # 'ridge' or 'NMF' for weight update

    opts_dict = {
        'fnames': fname_new,
        'ROIs': ROIs,
        'index': index,
        'weights': weights,
        'context_size': context_size,
        'flip_signal': flip_signal,
        'hp_freq_pb': hp_freq_pb,
        'threshold_method': threshold_method,
        'min_spikes': min_spikes,
        'threshold': threshold,
        'do_plot': do_plot,
        'ridge_bg': ridge_bg,
        'sub_freq': sub_freq,
        'weight_update': weight_update
    }

    opts.change_params(params_dict=opts_dict)

    #%% TRACE DENOISING AND SPIKE DETECTION
    vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts)
    vpy.fit(n_processes=n_processes, dview=dview)

    #%% visualization
    if display_images:
        print(np.where(
            vpy.estimates['locality'])[0])  # neurons that pass locality test
        idx = np.where(vpy.estimates['locality'] > 0)[0]
        utils.view_components(vpy.estimates, img_corr, idx)

#%% reconstructed movie
# note the negative spatial weights is cutoff
    if display_images:
        mv_all = utils.reconstructed_movie(vpy.estimates,
                                           fnames=mc.mmap_file,
                                           idx=idx,
                                           scope=(0, 1000),
                                           flip_signal=flip_signal)
        mv_all.play(fr=40)

# %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Ejemplo n.º 9
0
#%%
fname = '/Users/agiovann/NEL-LAB Dropbox/NEL/Papers/VolPy/Marton/456462_Cell_3_40x_1xtube_10A3.hdf5'
fname = '/Users/agiovann/NEL-LAB Dropbox/NEL/Papers/VolPy/Marton/456462_Cell_5_40x_1xtube_10A6.hdf5'
fname = '/Users/agiovann/NEL-LAB Dropbox/NEL/Papers/VolPy/Marton/video_small_region/462149_Cell_1_40x_1xtube_10A1.tif'
#%%
mcr = cm.load(fname)[:]

mcr = (-mcr).removeBL()

ycr = mcr.to_2D()

ycr = ycr - ycr.min()
#%%
mcr_lc = local_correlations_movie_offline(fname,
                                          window=100,
                                          stride=10,
                                          dview=dview,
                                          Tot_frames=10000)
ycr_lc = mcr_lc.to_2D()
#%%
immg = mcr.mean(axis=(1, 2))
immg = (immg - np.min(immg)) / (np.max(immg) - np.min(immg))
plt.plot(mcr_lc.mean(axis=(1, 2)))
plt.plot(dict1['v_sg'][100:] * 5)
plt.plot(immg[100:])
#%%
D_lc, tr_lc = spams.nmf(np.asfortranarray(ycr_lc.T), K=2, return_lasso=True)
plt.figure()
plt.plot(tr_lc.T.toarray())
plt.figure()
plt.imshow(D_lc[:, 0].reshape(mcr.shape[1:], order='F'), cmap='gray')